galera-3-25.3.20/0000755000015300001660000000000013042122666013140 5ustar jenkinsjenkinsgalera-3-25.3.20/galera/0000755000015300001660000000000013042054732014371 5ustar jenkinsjenkinsgalera-3-25.3.20/galera/src/0000755000015300001660000000000013042054732015160 5ustar jenkinsjenkinsgalera-3-25.3.20/galera/src/galera_info.hpp0000644000015300001660000000070213042054732020136 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy #ifndef __GALERA_INFO_H__ #define __GALERA_INFO_H__ #include "gcs.hpp" #include "wsrep_api.h" /* create view info out of configuration message */ extern wsrep_view_info_t* galera_view_info_create (const gcs_act_conf_t* conf, bool st_required); /* make a copy of view info object */ extern wsrep_view_info_t* galera_view_info_copy (const wsrep_view_info_t* vi); #endif // __GALERA_INFO_H__ galera-3-25.3.20/galera/src/wsdb.hpp0000644000015300001660000000560513042054732016636 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_WSDB_HPP #define GALERA_WSDB_HPP #include "trx_handle.hpp" #include "wsrep_api.h" #include "gu_unordered.hpp" namespace galera { class Wsdb { class Conn { public: Conn(wsrep_conn_id_t conn_id) : conn_id_(conn_id), trx_(0) { } Conn(const Conn& other) : conn_id_(other.conn_id_), trx_(other.trx_) { } ~Conn() { if (trx_ != 0) trx_->unref(); } void assign_trx(TrxHandle* trx) { if (trx_ != 0) trx_->unref(); trx_ = trx; } TrxHandle* get_trx() { return trx_; } private: void operator=(const Conn&); wsrep_conn_id_t conn_id_; TrxHandle* trx_; }; class TrxHash { public: size_t operator()(const wsrep_trx_id_t& key) const { return key; } }; typedef gu::UnorderedMap TrxMap; class ConnHash { public: size_t operator()(const wsrep_conn_id_t& key) const { return key; } }; typedef gu::UnorderedMap ConnMap; public: TrxHandle* get_trx(const TrxHandle::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t trx_id, bool create = false); void discard_trx(wsrep_trx_id_t trx_id); TrxHandle* get_conn_query(const TrxHandle::Params&, const wsrep_uuid_t&, wsrep_conn_id_t conn_id, bool create = false); void discard_conn(wsrep_conn_id_t conn_id); void discard_conn_query(wsrep_conn_id_t conn_id); Wsdb(); ~Wsdb(); void print(std::ostream& os) const; private: // Find existing trx handle in the map TrxHandle* find_trx(wsrep_trx_id_t trx_id); // Create new trx handle TrxHandle* create_trx(const TrxHandle::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t trx_id); Conn* get_conn(wsrep_conn_id_t conn_id, bool create); static const size_t trx_mem_limit_ = 1 << 20; TrxHandle::LocalPool trx_pool_; TrxMap trx_map_; gu::Mutex trx_mutex_; ConnMap conn_map_; gu::Mutex conn_mutex_; }; inline std::ostream& operator<<(std::ostream& os, const Wsdb& w) { w.print(os); return os; } } #endif // GALERA_WSDB_HPP galera-3-25.3.20/galera/src/key_entry_ng.hpp0000644000015300001660000000655313042054732020377 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #ifndef GALERA_KEY_ENTRY_NG_HPP #define GALERA_KEY_ENTRY_NG_HPP #include "trx_handle.hpp" namespace galera { class TrxHandle; class KeyEntryNG { public: KeyEntryNG(const KeySet::KeyPart& key) : refs_(), key_(key) { std::fill(&refs_[0], &refs_[KeySet::Key::P_LAST], reinterpret_cast(NULL)); } KeyEntryNG(const KeyEntryNG& other) : refs_(), key_(other.key_) { std::copy(&other.refs_[0], &other.refs_[KeySet::Key::P_LAST], &refs_[0]); } const KeySet::KeyPart& key() const { return key_; } void ref(KeySet::Key::Prefix p, const KeySet::KeyPart& k, TrxHandle* trx) { assert(0 == refs_[p] || refs_[p]->global_seqno() <= trx->global_seqno()); refs_[p] = trx; key_ = k; } void unref(KeySet::Key::Prefix p, TrxHandle* trx) { assert(refs_[p] != NULL); if (refs_[p] == trx) { refs_[p] = NULL; } else { assert(refs_[p]->global_seqno() > trx->global_seqno()); assert(0); } } bool referenced() const { bool ret(refs_[0] != NULL); for (int i(1); false == ret && i <= KeySet::Key::P_LAST; ++i) { ret = (refs_[i] != NULL); } return ret; } const TrxHandle* ref_trx(KeySet::Key::Prefix p) const { return refs_[p]; } size_t size() const { return sizeof(*this); } void swap(KeyEntryNG& other) throw() { using std::swap; gu::swap_array(refs_, other.refs_); swap(key_, other.key_); } KeyEntryNG& operator=(KeyEntryNG ke) { swap(ke); return *this; } ~KeyEntryNG() { assert(!referenced()); } private: TrxHandle* refs_[KeySet::Key::P_LAST + 1]; KeySet::KeyPart key_; #ifndef NDEBUG void assert_ref(KeySet::Key::Prefix, TrxHandle*) const; void assert_unref(KeySet::Key::Prefix, TrxHandle*) const; #endif /* NDEBUG */ }; inline void swap(KeyEntryNG& a, KeyEntryNG& b) { a.swap(b); } class KeyEntryHashNG { public: size_t operator()(const KeyEntryNG& ke) const { return ke.key().hash(); } }; class KeyEntryPtrHashNG { public: size_t operator()(const KeyEntryNG* const ke) const { return ke->key().hash(); } }; class KeyEntryEqualNG { public: bool operator()(const KeyEntryNG& left, const KeyEntryNG& right) const { return left.key().matches(right.key()); } }; class KeyEntryPtrEqualNG { public: bool operator()(const KeyEntryNG* const left, const KeyEntryNG* const right) const { return left->key().matches(right->key()); } }; } #endif // GALERA_KEY_ENTRY_HPP galera-3-25.3.20/galera/src/ist.cpp0000644000015300001660000005642513042054732016477 0ustar jenkinsjenkins// // Copyright (C) 2011-2014 Codership Oy // #include "ist.hpp" #include "ist_proto.hpp" #include "gu_logger.hpp" #include "gu_uri.hpp" #include "gu_debug_sync.hpp" #include "GCache.hpp" #include "galera_common.hpp" #include #include #include namespace { static std::string const CONF_KEEP_KEYS ("ist.keep_keys"); static bool const CONF_KEEP_KEYS_DEFAULT (true); } namespace galera { namespace ist { class AsyncSender : public Sender { public: AsyncSender(const gu::Config& conf, const std::string& peer, wsrep_seqno_t first, wsrep_seqno_t last, AsyncSenderMap& asmap, int version) : Sender (conf, asmap.gcache(), peer, version), conf_ (conf), peer_ (peer), first_ (first), last_ (last), asmap_ (asmap), thread_() { } const gu::Config& conf() { return conf_; } const std::string& peer() { return peer_; } wsrep_seqno_t first() { return first_; } wsrep_seqno_t last() { return last_; } AsyncSenderMap& asmap() { return asmap_; } pthread_t thread() { return thread_; } private: friend class AsyncSenderMap; const gu::Config& conf_; const std::string peer_; wsrep_seqno_t first_; wsrep_seqno_t last_; AsyncSenderMap& asmap_; pthread_t thread_; // GCC 4.8.5 on FreeBSD wants it AsyncSender(const AsyncSender&); AsyncSender& operator=(const AsyncSender&); }; } } std::string const galera::ist::Receiver::RECV_ADDR("ist.recv_addr"); std::string const galera::ist::Receiver::RECV_BIND("ist.recv_bind"); void galera::ist::register_params(gu::Config& conf) { conf.add(Receiver::RECV_ADDR); conf.add(Receiver::RECV_BIND); conf.add(CONF_KEEP_KEYS); } galera::ist::Receiver::Receiver(gu::Config& conf, TrxHandle::SlavePool& sp, const char* addr) : recv_addr_ (), recv_bind_ (), io_service_ (), acceptor_ (io_service_), ssl_ctx_ (io_service_, asio::ssl::context::sslv23), mutex_ (), cond_ (), consumers_ (), current_seqno_(-1), last_seqno_ (-1), conf_ (conf), trx_pool_ (sp), thread_ (), error_code_ (0), version_ (-1), use_ssl_ (false), running_ (false), ready_ (false) { std::string recv_addr; std::string recv_bind; try { recv_bind = conf_.get(RECV_BIND); // no return } catch (gu::NotSet& e) {} try /* check if receive address is explicitly set */ { recv_addr = conf_.get(RECV_ADDR); return; } catch (gu::NotSet& e) {} /* if not, check the alternative. TODO: try to find from system. */ if (addr) { try { recv_addr = gu::URI(std::string("tcp://") + addr).get_host(); conf_.set(RECV_ADDR, recv_addr); } catch (gu::NotSet& e) {} } } galera::ist::Receiver::~Receiver() { } extern "C" void* run_receiver_thread(void* arg) { galera::ist::Receiver* receiver(static_cast(arg)); receiver->run(); return 0; } static std::string IST_determine_recv_addr (gu::Config& conf) { std::string recv_addr; try { recv_addr = conf.get(galera::ist::Receiver::RECV_ADDR); } catch (gu::NotFound&) { try { recv_addr = conf.get(galera::BASE_HOST_KEY); } catch (gu::NotSet&) { gu_throw_error(EINVAL) << "Could not determine IST receinve address: '" << galera::ist::Receiver::RECV_ADDR << "' not set."; } } /* check if explicit scheme is present */ if (recv_addr.find("://") == std::string::npos) { bool ssl(false); try { std::string ssl_key = conf.get(gu::conf::ssl_key); if (ssl_key.length() != 0) ssl = true; } catch (gu::NotSet&) {} if (ssl) recv_addr.insert(0, "ssl://"); else recv_addr.insert(0, "tcp://"); } gu::URI ra_uri(recv_addr); if (!conf.has(galera::BASE_HOST_KEY)) conf.set(galera::BASE_HOST_KEY, ra_uri.get_host()); try /* check for explicit port, TODO: make it possible to use any free port (explicit 0?) */ { ra_uri.get_port(); } catch (gu::NotSet&) /* use gmcast listen port + 1 */ { int port(0); try { port = gu::from_string( // gu::URI(conf.get("gmcast.listen_addr")).get_port() conf.get(galera::BASE_PORT_KEY) ); } catch (...) { port = gu::from_string(galera::BASE_PORT_DEFAULT); } port += 1; recv_addr += ":" + gu::to_string(port); } log_info << "IST receiver addr using " << recv_addr; return recv_addr; } static std::string IST_determine_recv_bind(gu::Config& conf) { std::string recv_bind; recv_bind = conf.get(galera::ist::Receiver::RECV_BIND); /* check if explicit scheme is present */ if (recv_bind.find("://") == std::string::npos) { bool ssl(false); try { std::string ssl_key = conf.get(gu::conf::ssl_key); if (ssl_key.length() != 0) ssl = true; } catch (gu::NotSet&) { } if (ssl) recv_bind.insert(0, "ssl://"); else recv_bind.insert(0, "tcp://"); } gu::URI rb_uri(recv_bind); try /* check for explicit port, TODO: make it possible to use any free port (explicit 0?) */ { rb_uri.get_port(); } catch (gu::NotSet&) /* use gmcast listen port + 1 */ { int port(0); try { port = gu::from_string(conf.get(galera::BASE_PORT_KEY)); } catch (...) { port = gu::from_string(galera::BASE_PORT_DEFAULT); } port += 1; recv_bind += ":" + gu::to_string(port); } log_info<< "IST receiver bind using " << recv_bind; return recv_bind; } std::string galera::ist::Receiver::prepare(wsrep_seqno_t first_seqno, wsrep_seqno_t last_seqno, int version) { ready_ = false; version_ = version; recv_addr_ = IST_determine_recv_addr(conf_); try { recv_bind_ = IST_determine_recv_bind(conf_); } catch (gu::NotSet&) { recv_bind_ = recv_addr_; } gu::URI const uri_addr(recv_addr_); gu::URI const uri_bind(recv_bind_); try { if (uri_addr.get_scheme() == "ssl") { log_info << "IST receiver using ssl"; use_ssl_ = true; // Protocol versions prior 7 had a bug on sender side // which made sender to return null cert in handshake. // Therefore peer cert verfification must be enabled // only at protocol version 7 or higher. gu::ssl_prepare_context(conf_, ssl_ctx_, version >= 7); } asio::ip::tcp::resolver resolver(io_service_); asio::ip::tcp::resolver::query query(gu::unescape_addr(uri_bind.get_host()), uri_bind.get_port(), asio::ip::tcp::resolver::query::flags(0)); asio::ip::tcp::resolver::iterator i(resolver.resolve(query)); acceptor_.open(i->endpoint().protocol()); acceptor_.set_option(asio::ip::tcp::socket::reuse_address(true)); gu::set_fd_options(acceptor_); acceptor_.bind(*i); acceptor_.listen(); // read recv_addr_ from acceptor_ in case zero port was specified recv_addr_ = uri_addr.get_scheme() + "://" + uri_addr.get_host() + ":" + gu::to_string(acceptor_.local_endpoint().port()); } catch (asio::system_error& e) { recv_addr_ = ""; gu_throw_error(e.code().value()) << "Failed to open IST listener at " << uri_bind.to_string() << "', asio error '" << e.what() << "'"; } current_seqno_ = first_seqno; last_seqno_ = last_seqno; int err; if ((err = pthread_create(&thread_, 0, &run_receiver_thread, this)) != 0) { recv_addr_ = ""; gu_throw_error(err) << "Unable to create receiver thread"; } running_ = true; log_info << "Prepared IST receiver, listening at: " << (uri_bind.get_scheme() + "://" + gu::escape_addr(acceptor_.local_endpoint().address()) + ":" + gu::to_string(acceptor_.local_endpoint().port())); return recv_addr_; } void galera::ist::Receiver::run() { asio::ip::tcp::socket socket(io_service_); asio::ssl::stream ssl_stream(io_service_, ssl_ctx_); try { if (use_ssl_ == true) { acceptor_.accept(ssl_stream.lowest_layer()); gu::set_fd_options(ssl_stream.lowest_layer()); ssl_stream.handshake(asio::ssl::stream::server); } else { acceptor_.accept(socket); gu::set_fd_options(socket); } } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "accept() failed" << "', asio error '" << e.what() << "': " << gu::extra_error_info(e.code()); } acceptor_.close(); int ec(0); try { Proto p(trx_pool_, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); if (use_ssl_ == true) { p.send_handshake(ssl_stream); p.recv_handshake_response(ssl_stream); p.send_ctrl(ssl_stream, Ctrl::C_OK); } else { p.send_handshake(socket); p.recv_handshake_response(socket); p.send_ctrl(socket, Ctrl::C_OK); } while (true) { TrxHandle* trx; if (use_ssl_ == true) { trx = p.recv_trx(ssl_stream); } else { trx = p.recv_trx(socket); } if (trx != 0) { if (trx->global_seqno() != current_seqno_) { log_error << "unexpected trx seqno: " << trx->global_seqno() << " expected: " << current_seqno_; ec = EINVAL; goto err; } ++current_seqno_; } gu::Lock lock(mutex_); while (ready_ == false || consumers_.empty()) { lock.wait(cond_); } Consumer* cons(consumers_.top()); consumers_.pop(); cons->trx(trx); cons->cond().signal(); if (trx == 0) { log_debug << "eof received, closing socket"; break; } } } catch (asio::system_error& e) { log_error << "got error while reading ist stream: " << e.code(); ec = e.code().value(); } catch (gu::Exception& e) { ec = e.get_errno(); if (ec != EINTR) { log_error << "got exception while reading ist stream: " << e.what(); } } err: gu::Lock lock(mutex_); if (use_ssl_ == true) { ssl_stream.lowest_layer().close(); // ssl_stream.shutdown(); } else { socket.close(); } running_ = false; if (ec != EINTR && current_seqno_ - 1 < last_seqno_) { log_error << "IST didn't contain all write sets, expected last: " << last_seqno_ << " last received: " << current_seqno_ - 1; ec = EPROTO; } if (ec != EINTR) { error_code_ = ec; } while (consumers_.empty() == false) { consumers_.top()->cond().signal(); consumers_.pop(); } } void galera::ist::Receiver::ready() { gu::Lock lock(mutex_); ready_ = true; cond_.signal(); } int galera::ist::Receiver::recv(TrxHandle** trx) { Consumer cons; gu::Lock lock(mutex_); if (running_ == false) { if (error_code_ != 0) { gu_throw_error(error_code_) << "IST receiver reported error"; } return EINTR; } consumers_.push(&cons); cond_.signal(); lock.wait(cons.cond()); if (cons.trx() == 0) { if (error_code_ != 0) { gu_throw_error(error_code_) << "IST receiver reported error"; } return EINTR; } *trx = cons.trx(); return 0; } wsrep_seqno_t galera::ist::Receiver::finished() { if (recv_addr_ == "") { log_debug << "IST was not prepared before calling finished()"; } else { interrupt(); int err; if ((err = pthread_join(thread_, 0)) != 0) { log_warn << "Failed to join IST receiver thread: " << err; } acceptor_.close(); gu::Lock lock(mutex_); running_ = false; while (consumers_.empty() == false) { consumers_.top()->cond().signal(); consumers_.pop(); } recv_addr_ = ""; } return (current_seqno_ - 1); } void galera::ist::Receiver::interrupt() { gu::URI uri(recv_addr_); try { asio::ip::tcp::resolver::iterator i; try { asio::ip::tcp::resolver resolver(io_service_); asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); i = resolver.resolve(query); } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "failed to resolve host '" << uri.to_string() << "', asio error '" << e.what() << "'"; } if (use_ssl_ == true) { asio::ssl::stream ssl_stream(io_service_, ssl_ctx_); ssl_stream.lowest_layer().connect(*i); gu::set_fd_options(ssl_stream.lowest_layer()); ssl_stream.handshake(asio::ssl::stream::client); Proto p(trx_pool_, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); p.recv_handshake(ssl_stream); p.send_ctrl(ssl_stream, Ctrl::C_EOF); p.recv_ctrl(ssl_stream); } else { asio::ip::tcp::socket socket(io_service_); socket.connect(*i); gu::set_fd_options(socket); Proto p(trx_pool_, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); p.recv_handshake(socket); p.send_ctrl(socket, Ctrl::C_EOF); p.recv_ctrl(socket); } } catch (asio::system_error& e) { // ignore } } galera::ist::Sender::Sender(const gu::Config& conf, gcache::GCache& gcache, const std::string& peer, int version) : io_service_(), socket_ (io_service_), ssl_ctx_ (io_service_, asio::ssl::context::sslv23), ssl_stream_(0), conf_ (conf), gcache_ (gcache), version_ (version), use_ssl_ (false) { gu::URI uri(peer); try { asio::ip::tcp::resolver resolver(io_service_); asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); asio::ip::tcp::resolver::iterator i(resolver.resolve(query)); if (uri.get_scheme() == "ssl") { use_ssl_ = true; } if (use_ssl_ == true) { log_info << "IST sender using ssl"; ssl_prepare_context(conf, ssl_ctx_); // ssl_stream must be created after ssl_ctx_ is prepared... ssl_stream_ = new asio::ssl::stream( io_service_, ssl_ctx_); ssl_stream_->lowest_layer().connect(*i); gu::set_fd_options(ssl_stream_->lowest_layer()); ssl_stream_->handshake(asio::ssl::stream::client); } else { socket_.connect(*i); gu::set_fd_options(socket_); } } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "IST sender, failed to connect '" << peer.c_str() << "': " << e.what(); } } galera::ist::Sender::~Sender() { if (use_ssl_ == true) { ssl_stream_->lowest_layer().close(); delete ssl_stream_; } else { socket_.close(); } gcache_.seqno_unlock(); } void galera::ist::Sender::send(wsrep_seqno_t first, wsrep_seqno_t last) { if (first > last) { gu_throw_error(EINVAL) << "sender send first greater than last: " << first << " > " << last ; } try { TrxHandle::SlavePool unused(1, 0, ""); Proto p(unused, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); int32_t ctrl; if (use_ssl_ == true) { p.recv_handshake(*ssl_stream_); p.send_handshake_response(*ssl_stream_); ctrl = p.recv_ctrl(*ssl_stream_); } else { p.recv_handshake(socket_); p.send_handshake_response(socket_); ctrl = p.recv_ctrl(socket_); } if (ctrl < 0) { gu_throw_error(EPROTO) << "ist send failed, peer reported error: " << ctrl; } std::vector buf_vec( std::min(static_cast(last - first + 1), static_cast(1024))); ssize_t n_read; while ((n_read = gcache_.seqno_get_buffers(buf_vec, first)) > 0) { GU_DBUG_SYNC_WAIT("ist_sender_send_after_get_buffers") //log_info << "read " << first << " + " << n_read << " from gcache"; for (wsrep_seqno_t i(0); i < n_read; ++i) { // log_info << "sending " << buf_vec[i].seqno_g(); if (use_ssl_ == true) { p.send_trx(*ssl_stream_, buf_vec[i]); } else { p.send_trx(socket_, buf_vec[i]); } if (buf_vec[i].seqno_g() == last) { if (use_ssl_ == true) { p.send_ctrl(*ssl_stream_, Ctrl::C_EOF); } else { p.send_ctrl(socket_, Ctrl::C_EOF); } // wait until receiver closes the connection try { gu::byte_t b; size_t n; if (use_ssl_ == true) { n = asio::read(*ssl_stream_, asio::buffer(&b, 1)); } else { n = asio::read(socket_, asio::buffer(&b, 1)); } if (n > 0) { log_warn << "received " << n << " bytes, expected none"; } } catch (asio::system_error& e) { } return; } } first += n_read; // resize buf_vec to avoid scanning gcache past last size_t next_size(std::min(static_cast(last - first + 1), static_cast(1024))); if (buf_vec.size() != next_size) { buf_vec.resize(next_size); } } } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "ist send failed: " << e.code() << "', asio error '" << e.what() << "'"; } } extern "C" void* run_async_sender(void* arg) { galera::ist::AsyncSender* as(reinterpret_cast(arg)); log_info << "async IST sender starting to serve " << as->peer().c_str() << " sending " << as->first() << "-" << as->last(); wsrep_seqno_t join_seqno; try { as->send(as->first(), as->last()); join_seqno = as->last(); } catch (gu::Exception& e) { log_error << "async IST sender failed to serve " << as->peer().c_str() << ": " << e.what(); join_seqno = -e.get_errno(); } catch (...) { log_error << "async IST sender, failed to serve " << as->peer().c_str(); throw; } try { as->asmap().remove(as, join_seqno); pthread_detach(as->thread()); delete as; } catch (gu::NotFound& nf) { log_debug << "async IST sender already removed"; } log_info << "async IST sender served"; return 0; } void galera::ist::AsyncSenderMap::run(const gu::Config& conf, const std::string& peer, wsrep_seqno_t first, wsrep_seqno_t last, int version) { gu::Critical crit(monitor_); AsyncSender* as(new AsyncSender(conf, peer, first, last, *this, version)); int err(pthread_create(&as->thread_, 0, &run_async_sender, as)); if (err != 0) { delete as; gu_throw_error(err) << "failed to start sender thread"; } senders_.insert(as); } void galera::ist::AsyncSenderMap::remove(AsyncSender* as, wsrep_seqno_t seqno) { gu::Critical crit(monitor_); std::set::iterator i(senders_.find(as)); if (i == senders_.end()) { throw gu::NotFound(); } senders_.erase(i); } void galera::ist::AsyncSenderMap::cancel() { gu::Critical crit(monitor_); while (senders_.empty() == false) { AsyncSender* as(*senders_.begin()); senders_.erase(*senders_.begin()); int err; as->cancel(); monitor_.leave(); if ((err = pthread_join(as->thread_, 0)) != 0) { log_warn << "pthread_join() failed: " << err; } monitor_.enter(); delete as; } } galera-3-25.3.20/galera/src/certification.hpp0000644000015300001660000001524113042054732020517 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_CERTIFICATION_HPP #define GALERA_CERTIFICATION_HPP #include "trx_handle.hpp" #include "key_entry_ng.hpp" #include "galera_service_thd.hpp" #include "gu_unordered.hpp" #include "gu_lock.hpp" #include "gu_config.hpp" #include #include #include namespace galera { class Certification { public: static std::string const PARAM_LOG_CONFLICTS; static void register_params(gu::Config&); typedef gu::UnorderedSet CertIndex; typedef gu::UnorderedSet CertIndexNG; private: typedef std::multiset DepsSet; typedef std::map TrxMap; public: typedef enum { TEST_OK, TEST_FAILED } TestResult; Certification(gu::Config& conf, ServiceThd& thd); ~Certification(); void assign_initial_position(wsrep_seqno_t seqno, int versiono); TestResult append_trx(TrxHandle*); TestResult test(TrxHandle*, bool = true); wsrep_seqno_t position() const { return position_; } wsrep_seqno_t get_safe_to_discard_seqno() const { gu::Lock lock(mutex_); return get_safe_to_discard_seqno_(); } wsrep_seqno_t purge_trxs_upto(wsrep_seqno_t const seqno, bool const handle_gcache) { gu::Lock lock(mutex_); const wsrep_seqno_t stds(get_safe_to_discard_seqno_()); // assert(seqno <= get_safe_to_discard_seqno()); // Note: setting trx committed is not done in total order so // safe to discard seqno may decrease. Enable assertion above when // this issue is fixed. return purge_trxs_upto_(std::min(seqno, stds), handle_gcache); } // Set trx corresponding to handle committed. Return purge seqno if // index purge is required, -1 otherwise. wsrep_seqno_t set_trx_committed(TrxHandle*); TrxHandle* get_trx(wsrep_seqno_t); // statistics section void stats_get(double& avg_cert_interval, double& avg_deps_dist, size_t& index_size) const { gu::Lock lock(stats_mutex_); avg_cert_interval = 0; avg_deps_dist = 0; if (n_certified_) { avg_cert_interval = double(cert_interval_) / n_certified_; avg_deps_dist = double(deps_dist_) / n_certified_; } index_size = index_size_; } void stats_reset() { gu::Lock lock(stats_mutex_); cert_interval_ = 0; deps_dist_ = 0; n_certified_ = 0; index_size_ = 0; } void set_log_conflicts(const std::string& str); private: TestResult do_test(TrxHandle*, bool); TestResult do_test_v1to2(TrxHandle*, bool); TestResult do_test_v3(TrxHandle*, bool); TestResult do_test_preordered(TrxHandle*); void purge_for_trx(TrxHandle*); void purge_for_trx_v1to2(TrxHandle*); void purge_for_trx_v3(TrxHandle*); // unprotected variants for internal use wsrep_seqno_t get_safe_to_discard_seqno_() const; wsrep_seqno_t purge_trxs_upto_(wsrep_seqno_t, bool sync); bool index_purge_required() { static unsigned int const KEYS_THRESHOLD (1 << 10); // 1K static unsigned int const BYTES_THRESHOLD(128 << 20); // 128M static unsigned int const TRXS_THRESHOLD (127); /* if either key count, byte count or trx count exceed their * threshold, zero up counts and return true. */ return ((key_count_ > KEYS_THRESHOLD || byte_count_ > BYTES_THRESHOLD || trx_count_ > TRXS_THRESHOLD) && (key_count_ = 0, byte_count_ = 0, trx_count_ = 0, true)); } class PurgeAndDiscard { public: PurgeAndDiscard(Certification& cert) : cert_(cert) { } void operator()(TrxMap::value_type& vt) const { { TrxHandle* trx(vt.second); TrxHandleLock lock(*trx); if (trx->is_committed() == false) { log_warn << "trx not committed in purge and discard: " << *trx; } if (trx->depends_seqno() > -1) { cert_.purge_for_trx(trx); } if (trx->refcnt() > 1) { log_debug << "trx " << trx->trx_id() << " refcnt " << trx->refcnt(); } } vt.second->unref(); } PurgeAndDiscard(const PurgeAndDiscard& other) : cert_(other.cert_) { } private: void operator=(const PurgeAndDiscard&); Certification& cert_; }; int version_; TrxMap trx_map_; CertIndex cert_index_; CertIndexNG cert_index_ng_; DepsSet deps_set_; ServiceThd& service_thd_; gu::Mutex mutex_; size_t trx_size_warn_count_; wsrep_seqno_t initial_position_; wsrep_seqno_t position_; wsrep_seqno_t safe_to_discard_seqno_; wsrep_seqno_t last_pa_unsafe_; wsrep_seqno_t last_preordered_seqno_; wsrep_trx_id_t last_preordered_id_; gu::Mutex stats_mutex_; size_t n_certified_; wsrep_seqno_t deps_dist_; wsrep_seqno_t cert_interval_; size_t index_size_; size_t key_count_; size_t byte_count_; size_t trx_count_; /* The only reason those are not static constants is because * there might be a need to thange them without recompilation. * see #454 */ int const max_length_; /* Purge trx_map_ when it exceeds this * NOTE: this effectively sets a limit * on trx certification interval */ unsigned int const max_length_check_; /* Mask how often to check */ bool log_conflicts_; }; } #endif // GALERA_CERTIFICATION_HPP galera-3-25.3.20/galera/src/galera_gcs.hpp0000644000015300001660000003221213042054732017760 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_GCS_HPP #define GALERA_GCS_HPP #include "write_set_ng.hpp" #include "wsrep_api.h" #include "gcs.hpp" #include #include #include #include #include #include // for gu::Mutex and gu::Cond #include #include #define GCS_IMPL Gcs namespace galera { class GcsI { public: GcsI() {} virtual ~GcsI() {} virtual ssize_t connect(const std::string& cluster_name, const std::string& cluster_url, bool bootstrap) = 0; virtual ssize_t set_initial_position(const wsrep_uuid_t& uuid, gcs_seqno_t seqno) = 0; virtual void close() = 0; virtual ssize_t recv(gcs_action& act) = 0; typedef WriteSetNG::GatherVector WriteSetVector; virtual ssize_t sendv(const WriteSetVector&, size_t, gcs_act_type_t, bool) = 0; virtual ssize_t send (const void*, size_t, gcs_act_type_t, bool) = 0; virtual ssize_t replv(const WriteSetVector&, gcs_action& act, bool) = 0; virtual ssize_t repl (gcs_action& act, bool) = 0; virtual gcs_seqno_t caused() = 0; virtual ssize_t schedule() = 0; virtual ssize_t interrupt(ssize_t) = 0; virtual ssize_t resume_recv() = 0; virtual ssize_t set_last_applied(gcs_seqno_t) = 0; virtual ssize_t request_state_transfer(int version, const void* req, ssize_t req_len, const std::string& sst_donor, const gu_uuid_t& ist_uuid, gcs_seqno_t ist_seqno, gcs_seqno_t* seqno_l) = 0; virtual ssize_t desync(gcs_seqno_t* seqno_l) = 0; virtual void join(gcs_seqno_t seqno) = 0; virtual gcs_seqno_t local_sequence() = 0; virtual void get_stats(gcs_stats*) const = 0; virtual void flush_stats() = 0; virtual void get_status(gu::Status&) const = 0; /*! @throws NotFound */ virtual void param_set (const std::string& key, const std::string& value) = 0; /*! @throws NotFound */ virtual char* param_get (const std::string& key) const = 0; virtual size_t max_action_size() const = 0; }; class Gcs : public GcsI { public: Gcs(gu::Config& config, gcache::GCache& cache, int repl_proto_ver = 0, int appl_proto_ver = 0, const char* node_name = 0, const char* node_incoming = 0) : conn_(gcs_create(reinterpret_cast(&config), reinterpret_cast(&cache), node_name, node_incoming, repl_proto_ver, appl_proto_ver)) { log_info << "Passing config to GCS: " << config; if (conn_ == 0) gu_throw_fatal << "could not create gcs connection"; } ~Gcs() { gcs_destroy(conn_); } ssize_t connect(const std::string& cluster_name, const std::string& cluster_url, bool const bootstrap) { return gcs_open(conn_, cluster_name.c_str(), cluster_url.c_str(), bootstrap); } ssize_t set_initial_position(const wsrep_uuid_t& uuid, gcs_seqno_t seqno) { return gcs_init(conn_, seqno, uuid.data); } void close() { gcs_close(conn_); } ssize_t recv(struct gcs_action& act) { return gcs_recv(conn_, &act); } ssize_t sendv(const WriteSetVector& actv, size_t act_len, gcs_act_type_t act_type, bool scheduled) { return gcs_sendv(conn_, &actv[0], act_len, act_type, scheduled); } ssize_t send(const void* act, size_t act_len, gcs_act_type_t act_type, bool scheduled) { return gcs_send(conn_, act, act_len, act_type, scheduled); } ssize_t replv(const WriteSetVector& actv, struct gcs_action& act, bool scheduled) { return gcs_replv(conn_, &actv[0], &act, scheduled); } ssize_t repl(struct gcs_action& act, bool scheduled) { return gcs_repl(conn_, &act, scheduled); } gcs_seqno_t caused() { return gcs_caused(conn_); } ssize_t schedule() { return gcs_schedule(conn_); } ssize_t interrupt(ssize_t handle) { return gcs_interrupt(conn_, handle); } ssize_t resume_recv() { return gcs_resume_recv(conn_); } ssize_t set_last_applied(gcs_seqno_t last_applied) { return gcs_set_last_applied(conn_, last_applied); } ssize_t request_state_transfer(int version, const void* req, ssize_t req_len, const std::string& sst_donor, const gu_uuid_t& ist_uuid, gcs_seqno_t ist_seqno, gcs_seqno_t* seqno_l) { return gcs_request_state_transfer(conn_, version, req, req_len, sst_donor.c_str(), &ist_uuid, ist_seqno, seqno_l); } ssize_t desync (gcs_seqno_t* seqno_l) { return gcs_desync(conn_, seqno_l); } void join (gcs_seqno_t seqno) { long const err(gcs_join(conn_, seqno)); if (err < 0) { gu_throw_error (-err) << "gcs_join(" << seqno << ") failed"; } } gcs_seqno_t local_sequence() { return gcs_local_sequence(conn_); } void get_stats(gcs_stats* stats) const { return gcs_get_stats(conn_, stats); } void flush_stats() { return gcs_flush_stats(conn_); } void get_status(gu::Status& status) const { gcs_get_status(conn_, status); } void param_set (const std::string& key, const std::string& value) { long ret = gcs_param_set (conn_, key.c_str(), value.c_str()); if (1 == ret) { throw gu::NotFound(); } else if (ret) { gu_throw_error(-ret) << "Setting '" << key << "' to '" << value << "' failed"; } } char* param_get (const std::string& key) const { gu_throw_error(ENOSYS) << "Not implemented: " << __FUNCTION__; return 0; } size_t max_action_size() const { return GCS_MAX_ACT_SIZE; } private: Gcs(const Gcs&); void operator=(const Gcs&); gcs_conn_t* conn_; }; class DummyGcs : public GcsI { public: DummyGcs(gu::Config& config, gcache::GCache& cache, int repl_proto_ver = 0, int appl_proto_ver = 0, const char* node_name = 0, const char* node_incoming = 0); DummyGcs(); // for unit tests ~DummyGcs(); ssize_t connect(const std::string& cluster_name, const std::string& cluster_url, bool bootstrap); ssize_t set_initial_position(const wsrep_uuid_t& uuid, gcs_seqno_t seqno); void close(); ssize_t recv(gcs_action& act); ssize_t sendv(const WriteSetVector&, size_t, gcs_act_type_t, bool) { return -ENOSYS; } ssize_t send(const void*, size_t, gcs_act_type_t, bool) { return -ENOSYS; } ssize_t replv(const WriteSetVector& actv, gcs_action& act, bool scheduled) { ssize_t ret(set_seqnos(act)); if (gu_likely(0 != gcache_ && ret > 0)) { assert (ret == act.size); gu::byte_t* ptr( reinterpret_cast(gcache_->malloc(act.size))); act.buf = ptr; ssize_t copied(0); for (int i(0); copied < act.size; ++i) { memcpy (ptr + copied, actv[i].ptr, actv[i].size); copied += actv[i].size; } assert (copied == act.size); } return ret; } ssize_t repl(gcs_action& act, bool scheduled) { ssize_t ret(set_seqnos(act)); if (gu_likely(0 != gcache_ && ret > 0)) { assert (ret == act.size); void* ptr(gcache_->malloc(act.size)); memcpy (ptr, act.buf, act.size); act.buf = ptr; } return ret; } gcs_seqno_t caused() { return global_seqno_; } ssize_t schedule() { return 1; } ssize_t interrupt(ssize_t handle); ssize_t resume_recv() { return 0; } ssize_t set_last_applied(gcs_seqno_t last_applied) { gu::Lock lock(mtx_); last_applied_ = last_applied; report_last_applied_ = true; cond_.signal(); return 0; } gcs_seqno_t last_applied() const { return last_applied_; } ssize_t request_state_transfer(int version, const void* req, ssize_t req_len, const std::string& sst_donor, const gu_uuid_t& ist_uuid, gcs_seqno_t ist_seqno, gcs_seqno_t* seqno_l) { *seqno_l = GCS_SEQNO_ILL; return -ENOSYS; } ssize_t desync (gcs_seqno_t* seqno_l) { *seqno_l = GCS_SEQNO_ILL; return -ENOTCONN; } void join(gcs_seqno_t seqno) { gu_throw_error(ENOTCONN); } gcs_seqno_t local_sequence() { gu::Lock lock(mtx_); return ++local_seqno_; } void get_stats(gcs_stats* stats) const { memset (stats, 0, sizeof(*stats)); } void flush_stats() {} void get_status(gu::Status& status) const {} void param_set (const std::string& key, const std::string& value) {} char* param_get (const std::string& key) const { return 0; } size_t max_action_size() const { return 0x7FFFFFFF; } private: typedef enum { S_CLOSED, S_OPEN, S_CONNECTED, S_SYNCED } conn_state_t; ssize_t generate_seqno_action (gcs_action& act, gcs_act_type_t type); ssize_t generate_cc (bool primary); gu::Config* gconf_; gcache::GCache* gcache_; gu::Mutex mtx_; gu::Cond cond_; gcs_seqno_t global_seqno_; gcs_seqno_t local_seqno_; gu_uuid_t uuid_; gcs_seqno_t last_applied_; conn_state_t state_; gu::Lock* schedule_; void* cc_; ssize_t cc_size_; std::string const my_name_; std::string const incoming_; int repl_proto_ver_; int appl_proto_ver_; bool report_last_applied_; ssize_t set_seqnos (gcs_action& act) { act.seqno_g = GCS_SEQNO_ILL; act.seqno_l = GCS_SEQNO_ILL; ssize_t ret(-EBADFD); { gu::Lock lock(mtx_); switch (state_) { case S_CONNECTED: case S_SYNCED: { ++global_seqno_; act.seqno_g = global_seqno_; ++local_seqno_; act.seqno_l = local_seqno_; ret = act.size; break; } case S_CLOSED: ret = -EBADFD; break; case S_OPEN: ret = -ENOTCONN; break; } } return ret; } DummyGcs (const DummyGcs&); DummyGcs& operator=(const DummyGcs&); }; } #endif // GALERA_GCS_HPP galera-3-25.3.20/galera/src/uuid.hpp0000644000015300001660000000301213042054732016633 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_UUID_HPP #define GALERA_UUID_HPP #include "wsrep_api.h" #include "gu_uuid.hpp" #include namespace galera { inline const gu_uuid_t& to_gu_uuid(const wsrep_uuid_t& uuid) { return *reinterpret_cast(&uuid); } inline gu_uuid_t& to_gu_uuid(wsrep_uuid_t& uuid) { return *reinterpret_cast(&uuid); } inline bool operator==(const wsrep_uuid_t& a, const wsrep_uuid_t& b) { return to_gu_uuid(a) == to_gu_uuid(b); } inline bool operator!=(const wsrep_uuid_t& a, const wsrep_uuid_t& b) { return !(a == b); } inline std::ostream& operator<<(std::ostream& os, const wsrep_uuid_t& uuid) { return os << to_gu_uuid(uuid); } inline std::istream& operator>>(std::istream& is, wsrep_uuid_t& uuid) { return is >> to_gu_uuid(uuid); } inline size_t serial_size(const wsrep_uuid_t& uuid) { return gu_uuid_serial_size(to_gu_uuid(uuid)); } inline size_t serialize(const wsrep_uuid_t& uuid, gu::byte_t* buf, size_t buflen, size_t offset) { return gu_uuid_serialize(to_gu_uuid(uuid), buf, buflen, offset); } inline size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, wsrep_uuid_t& uuid) { return gu_uuid_unserialize(buf, buflen, offset, to_gu_uuid(uuid)); } } #endif // GALERA_UUID_HPP galera-3-25.3.20/galera/src/action_source.hpp0000644000015300001660000000056013042054732020527 0ustar jenkinsjenkins// // Copyright (C) 2010-2013 Codership Oy // #ifndef GALERA_ACTION_SOURCE_HPP #define GALERA_ACTION_SOURCE_HPP namespace galera { class ActionSource { public: ActionSource() { } virtual ~ActionSource() { } virtual ssize_t process(void* ctx, bool& exit_loop) = 0; }; } #endif // GALERA_ACTION_SOURCE_HPP galera-3-25.3.20/galera/src/galera_service_thd.cpp0000644000015300001660000000755413042054732021511 0ustar jenkinsjenkins/* * Copyright (C) 2010-2013 Codership Oy * * Using broadcasts instead of signals below to wake flush callers due to * theoretical possibility of more than 2 threads involved. */ #include "galera_service_thd.hpp" const uint32_t galera::ServiceThd::A_NONE = 0; static const uint32_t A_LAST_COMMITTED = 1U << 0; static const uint32_t A_RELEASE_SEQNO = 1U << 1; static const uint32_t A_FLUSH = 1U << 30; static const uint32_t A_EXIT = 1U << 31; void* galera::ServiceThd::thd_func (void* arg) { galera::ServiceThd* st = reinterpret_cast(arg); bool exit = false; while (!exit) { galera::ServiceThd::Data data; { gu::Lock lock(st->mtx_); if (A_NONE == st->data_.act_) lock.wait(st->cond_); data = st->data_; st->data_.act_ = A_NONE; // clear pending actions if (data.act_ & A_FLUSH) { if (A_FLUSH == data.act_) { // no other actions scheduled (all previous are "flushed") log_info << "Service thread queue flushed."; st->flush_.broadcast(); } else { // restore flush flag for the next iteration st->data_.act_ |= A_FLUSH; } } } exit = ((data.act_ & A_EXIT)); if (!exit) { if (data.act_ & A_LAST_COMMITTED) { ssize_t const ret (st->gcs_.set_last_applied(data.last_committed_)); if (gu_unlikely(ret < 0)) { log_warn << "Failed to report last committed " << data.last_committed_ << ", " << ret << " (" << strerror (-ret) << ')'; // @todo: figure out what to do in this case } else { log_debug << "Reported last committed: " << data.last_committed_; } } if (data.act_ & A_RELEASE_SEQNO) { try { st->gcache_.seqno_release(data.release_seqno_); } catch (std::exception& e) { log_warn << "Exception releasing seqno " << data.release_seqno_ << ": " << e.what(); } } } } return 0; } galera::ServiceThd::ServiceThd (GcsI& gcs, gcache::GCache& gcache) : gcache_ (gcache), gcs_ (gcs), thd_ (), mtx_ (), cond_ (), flush_ (), data_ () { gu_thread_create (&thd_, NULL, thd_func, this); } galera::ServiceThd::~ServiceThd () { { gu::Lock lock(mtx_); data_.act_ = A_EXIT; cond_.signal(); flush_.broadcast(); } gu_thread_join(thd_, NULL); } void galera::ServiceThd::flush() { gu::Lock lock(mtx_); if (!(data_.act_ & A_EXIT)) { if (data_.act_ == A_NONE) cond_.signal(); data_.act_ |= A_FLUSH; do { lock.wait(flush_); } while (data_.act_ & A_FLUSH); } } void galera::ServiceThd::reset() { gu::Lock lock(mtx_); data_.act_ = A_NONE; data_.last_committed_ = 0; } void galera::ServiceThd::report_last_committed(gcs_seqno_t seqno) { gu::Lock lock(mtx_); if (data_.last_committed_ < seqno) { data_.last_committed_ = seqno; if (data_.act_ == A_NONE) cond_.signal(); data_.act_ |= A_LAST_COMMITTED; } } void galera::ServiceThd::release_seqno(gcs_seqno_t seqno) { gu::Lock lock(mtx_); if (data_.release_seqno_ < seqno) { data_.release_seqno_ = seqno; if (data_.act_ == A_NONE) cond_.signal(); data_.act_ |= A_RELEASE_SEQNO; } } galera-3-25.3.20/galera/src/mapped_buffer.cpp0000644000015300001660000000735313042054732020473 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #define _FILE_OFFSET_BITS 64 #include "mapped_buffer.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_macros.h" #include #include #include #include #include // MAP_FAILED is defined as (void *) -1 #pragma GCC diagnostic ignored "-Wold-style-cast" using namespace std; using namespace gu; galera::MappedBuffer::MappedBuffer(const std::string& working_dir, size_t threshold) : working_dir_ (working_dir), file_ (), fd_ (-1), threshold_ (threshold), buf_ (0), buf_size_ (0), real_buf_size_(0) { } galera::MappedBuffer::~MappedBuffer() { if (fd_ != -1) { struct stat st; fstat(fd_, &st); log_debug << "file size " << st.st_size; } clear(); } void galera::MappedBuffer::reserve(size_t sz) { if (real_buf_size_ >= sz) { // no need for reallocation return; } if (sz > threshold_) { // buffer size exceeds in-memory threshold, have to mmap if (gu_unlikely(std::numeric_limits::max() - sz < threshold_)) { sz = std::numeric_limits::max(); } else { sz = (sz/threshold_ + 1)*threshold_; } if (gu_unlikely(sz > static_cast(std::numeric_limits::max()))) { gu_throw_error(EINVAL) << "size exceeds maximum of off_t"; } if (fd_ == -1) { file_ = working_dir_ + "/gmb_XXXXXX"; fd_ = mkstemp(&file_[0]); if (fd_ == -1) { gu_throw_error(errno) << "mkstemp(" << file_ << ") failed"; } if (ftruncate(fd_, sz) == -1) { gu_throw_error(errno) << "ftruncate() failed"; } byte_t* tmp(reinterpret_cast( mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd_, 0))); if (tmp == MAP_FAILED) { free(buf_); buf_ = 0; clear(); gu_throw_error(ENOMEM) << "mmap() failed"; } copy(buf_, buf_ + buf_size_, tmp); free(buf_); buf_ = tmp; } else { if (munmap(buf_, real_buf_size_) != 0) { gu_throw_error(errno) << "munmap() failed"; } if (ftruncate(fd_, sz) == -1) { gu_throw_error(errno) << "fruncate() failed"; } byte_t* tmp(reinterpret_cast( mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd_, 0))); if (tmp == MAP_FAILED) { buf_ = 0; clear(); gu_throw_error(ENOMEM) << "mmap() failed"; } buf_ = tmp; } } else { sz = min(threshold_, sz*2); byte_t* tmp(reinterpret_cast(realloc(buf_, sz))); if (tmp == 0) { gu_throw_error(ENOMEM) << "realloc failed"; } buf_ = tmp; } real_buf_size_ = sz; } void galera::MappedBuffer::resize(size_t sz) { reserve(sz); buf_size_ = sz; } void galera::MappedBuffer::clear() { if (fd_ != -1) { if (buf_ != 0) munmap(buf_, real_buf_size_); while (close(fd_) == EINTR) { } unlink(file_.c_str()); } else { free(buf_); } fd_ = -1; buf_ = 0; buf_size_ = 0; real_buf_size_ = 0; } galera-3-25.3.20/galera/src/galera_exception.hpp0000644000015300001660000000315213042054732021203 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_EXCEPTION_HPP #define GALERA_EXCEPTION_HPP #include #include #include "wsrep_api.h" namespace galera { /*! * An exception to handle applier errors and avoid confusing wsrep error codes * with the standard ones */ class ApplyException : public gu::Exception { public: ApplyException (const std::string& msg, int err) : gu::Exception (msg, err) { if (err < 0) // sanity check { log_fatal << "Attempt to throw exception with a " << err << " code"; abort(); } } /* this is just int because we must handle any positive value */ int status () { return get_errno(); } }; static inline const char* wsrep_status_str(wsrep_status_t& status) { switch (status) { case WSREP_OK: return "WSREP_OK"; case WSREP_WARNING: return "WSREP_WARNING"; case WSREP_TRX_MISSING: return "WSREP_TRX_MISSING"; case WSREP_TRX_FAIL: return "WSREP_TRX_FAIL"; case WSREP_BF_ABORT: return "WSREP_BF_ABORT"; case WSREP_CONN_FAIL: return "WSREP_CONN_FAIL"; case WSREP_NODE_FAIL: return "WSREP_NODE_FAIL"; case WSREP_FATAL: return "WSREP_FATAL"; case WSREP_NOT_IMPLEMENTED: return "WSREP_NOT_IMPLEMENTED"; default: return "(unknown code)"; } } /*! * And exception to handle replication errors */ class ReplException : public gu::Exception { public: ReplException (const std::string& msg, int err) : gu::Exception (msg, err) {} }; } #endif /* GALERA_EXCEPTION_HPP */ galera-3-25.3.20/galera/src/certification.cpp0000644000015300001660000010277213042054732020520 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "certification.hpp" #include "uuid.hpp" #include "gu_lock.hpp" #include "gu_throw.hpp" #include using namespace galera; static const bool cert_debug_on(false); #define cert_debug \ if (cert_debug_on == false) { } \ else log_info << "cert debug: " #define CERT_PARAM_LOG_CONFLICTS galera::Certification::PARAM_LOG_CONFLICTS static std::string const CERT_PARAM_PREFIX("cert."); std::string const galera::Certification::PARAM_LOG_CONFLICTS(CERT_PARAM_PREFIX + "log_conflicts"); static std::string const CERT_PARAM_MAX_LENGTH (CERT_PARAM_PREFIX + "max_length"); static std::string const CERT_PARAM_LENGTH_CHECK (CERT_PARAM_PREFIX + "length_check"); static std::string const CERT_PARAM_LOG_CONFLICTS_DEFAULT("no"); /*** It is EXTREMELY important that these constants are the same on all nodes. *** Don't change them ever!!! ***/ static std::string const CERT_PARAM_MAX_LENGTH_DEFAULT("16384"); static std::string const CERT_PARAM_LENGTH_CHECK_DEFAULT("127"); void galera::Certification::register_params(gu::Config& cnf) { cnf.add(CERT_PARAM_LOG_CONFLICTS, CERT_PARAM_LOG_CONFLICTS_DEFAULT); /* The defaults below are deliberately not reflected in conf: people * should not know about these dangerous setting unless they read RTFM. */ cnf.add(CERT_PARAM_MAX_LENGTH); cnf.add(CERT_PARAM_LENGTH_CHECK); } /* a function to get around unset defaults in ctor initialization list */ static int max_length(const gu::Config& conf) { if (conf.is_set(CERT_PARAM_MAX_LENGTH)) return conf.get(CERT_PARAM_MAX_LENGTH); else return gu::Config::from_config(CERT_PARAM_MAX_LENGTH_DEFAULT); } /* a function to get around unset defaults in ctor initialization list */ static int length_check(const gu::Config& conf) { if (conf.is_set(CERT_PARAM_LENGTH_CHECK)) return conf.get(CERT_PARAM_LENGTH_CHECK); else return gu::Config::from_config(CERT_PARAM_LENGTH_CHECK_DEFAULT); } void galera::Certification::purge_for_trx_v1to2(TrxHandle* trx) { TrxHandle::CertKeySet& refs(trx->cert_keys_); // Unref all referenced and remove if was referenced only by us for (TrxHandle::CertKeySet::iterator i = refs.begin(); i != refs.end(); ++i) { KeyEntryOS* const kel(i->first); const bool full_key(i->second.first); const bool shared(i->second.second); CertIndex::iterator ci(cert_index_.find(kel)); assert(ci != cert_index_.end()); KeyEntryOS* const ke(*ci); if (shared == false && (ke->ref_trx() == trx || ke->ref_full_trx() == trx)) { ke->unref(trx, full_key); } if (shared == true && (ke->ref_shared_trx() == trx || ke->ref_full_shared_trx() == trx)) { ke->unref_shared(trx, full_key); } if (ke->ref_trx() == 0 && ke->ref_shared_trx() == 0) { assert(ke->ref_full_trx() == 0); assert(ke->ref_full_shared_trx() == 0); delete ke; cert_index_.erase(ci); } if (kel != ke) delete kel; } } void galera::Certification::purge_for_trx_v3(TrxHandle* trx) { const KeySetIn& keys(trx->write_set_in().keyset()); keys.rewind(); // Unref all referenced and remove if was referenced only by us for (long i = 0; i < keys.count(); ++i) { const KeySet::KeyPart& kp(keys.next()); KeySet::Key::Prefix const p(kp.prefix()); KeyEntryNG ke(kp); CertIndexNG::iterator const ci(cert_index_ng_.find(&ke)); // assert(ci != cert_index_ng_.end()); if (gu_unlikely(cert_index_ng_.end() == ci)) { log_warn << "Missing key"; continue; } KeyEntryNG* const kep(*ci); assert(kep->referenced()); if (kep->ref_trx(p) == trx) { kep->unref(p, trx); if (kep->referenced() == false) { cert_index_ng_.erase(ci); delete kep; } } } } void galera::Certification::purge_for_trx(TrxHandle* trx) { if (trx->new_version()) purge_for_trx_v3(trx); else purge_for_trx_v1to2(trx); } /*! for convenience returns true if conflict and false if not */ static inline bool certify_and_depend_v1to2(const galera::KeyEntryOS* const match, galera::TrxHandle* const trx, bool const full_key, bool const exclusive_key, bool const log_conflict) { // 1) if the key is full, match for any trx // 2) if the key is partial, match for trx with full key const galera::TrxHandle* const ref_trx(full_key == true ? match->ref_trx() : match->ref_full_trx()); if (cert_debug_on && ref_trx) { cert_debug << "exclusive match (" << (full_key == true ? "full" : "partial") << ") " << *trx << " <-----> " << *ref_trx; } wsrep_seqno_t const ref_seqno(ref_trx ? ref_trx->global_seqno() : -1); // trx should not have any references in index at this point assert(ref_trx != trx); if (gu_likely(0 != ref_trx)) { // cert conflict takes place if // 1) write sets originated from different nodes, are within cert range // 2) ref_trx is in isolation mode, write sets are within cert range if ((trx->source_id() != ref_trx->source_id() || (ref_trx->flags() & galera::TrxHandle::F_ISOLATION) != 0) && ref_seqno > trx->last_seen_seqno()) { if (gu_unlikely(log_conflict == true)) { log_info << "trx conflict for key " << match->get_key(ref_trx->version()) << ": " << *trx << " <--X--> " << *ref_trx; } return true; } } wsrep_seqno_t depends_seqno(ref_seqno); if (exclusive_key) // exclusive keys must depend on shared refs as well { const galera::TrxHandle* const ref_shared_trx(full_key == true ? match->ref_shared_trx() : match->ref_full_shared_trx()); assert(ref_shared_trx != trx); if (ref_shared_trx) { cert_debug << "shared match (" << (full_key == true ? "full" : "partial") << ") " << *trx << " <-----> " << *ref_shared_trx; depends_seqno = std::max(ref_shared_trx->global_seqno(), depends_seqno); } } trx->set_depends_seqno(std::max(trx->depends_seqno(), depends_seqno)); return false; } static bool certify_v1to2(galera::TrxHandle* trx, galera::Certification::CertIndex& cert_index, const galera::KeyOS& key, bool const store_keys, bool const log_conflicts) { typedef std::list KPS; KPS key_parts(key.key_parts()); KPS::const_iterator begin(key_parts.begin()), end; bool full_key(false); galera::TrxHandle::CertKeySet& key_list(trx->cert_keys()); for (end = begin; full_key == false; end != key_parts.end() ? ++end : end) { full_key = (end == key_parts.end()); galera::Certification::CertIndex::iterator ci; galera::KeyEntryOS ke(key.version(), begin, end, key.flags()); cert_debug << "key: " << ke.get_key() << " (" << (full_key == true ? "full" : "partial") << ")"; bool const shared_key(ke.get_key().flags() & galera::KeyOS::F_SHARED); if (store_keys && (key_list.find(&ke) != key_list.end())) { // avoid certification for duplicates // should be removed once we can eleminate dups on deserialization continue; } galera::KeyEntryOS* kep; if ((ci = cert_index.find(&ke)) == cert_index.end()) { if (store_keys) { kep = new galera::KeyEntryOS(ke); ci = cert_index.insert(kep).first; cert_debug << "created new entry"; } } else { cert_debug << "found existing entry"; // Note: For we skip certification for isolated trxs, only // cert index and key_list is populated. if ((trx->flags() & galera::TrxHandle::F_ISOLATION) == 0 && certify_and_depend_v1to2(*ci, trx, full_key, !shared_key, log_conflicts)) { return false; } if (store_keys) { if (gu_likely( true == ke.get_key().equal_all((*ci)->get_key()))) { kep = *ci; } else { // duplicate with different flags - need to store a copy kep = new galera::KeyEntryOS(ke); } } } if (store_keys) { key_list.insert(std::make_pair(kep, std::make_pair(full_key, shared_key))); } } return true; } galera::Certification::TestResult galera::Certification::do_test_v1to2(TrxHandle* trx, bool store_keys) { cert_debug << "BEGIN CERTIFICATION v1to2: " << *trx; #ifndef NDEBUG // to check that cleanup after cert failure returns cert_index_ // to original size size_t prev_cert_index_size(cert_index_.size()); #endif // NDEBUG galera::TrxHandle::CertKeySet& key_list(trx->cert_keys_); long key_count(0); size_t offset(0); const gu::byte_t* buf(trx->write_set_buffer().first); const size_t buf_len(trx->write_set_buffer().second); while (offset < buf_len) { std::pair k(WriteSet::segment(buf, buf_len, offset)); // Scan over all keys offset = k.first; while (offset < k.first + k.second) { KeyOS key(trx->version()); offset = key.unserialize(buf, buf_len, offset); if (certify_v1to2(trx, cert_index_, key, store_keys, log_conflicts_) == false) { goto cert_fail; } ++key_count; } // Skip data part std::pair d(WriteSet::segment(buf, buf_len, offset)); offset = d.first + d.second; } trx->set_depends_seqno(std::max(trx->depends_seqno(), last_pa_unsafe_)); if (store_keys == true) { for (TrxHandle::CertKeySet::iterator i(key_list.begin()); i != key_list.end();) { KeyEntryOS* const kel(i->first); CertIndex::const_iterator ci(cert_index_.find(kel)); if (ci == cert_index_.end()) { gu_throw_fatal << "could not find key '" << kel->get_key() << "' from cert index"; } KeyEntryOS* const ke(*ci); const bool full_key(i->second.first); const bool shared_key(i->second.second); bool keep(false); if (shared_key == false) { if ((full_key == false && ke->ref_trx() != trx) || (full_key == true && ke->ref_full_trx() != trx)) { ke->ref(trx, full_key); keep = true; } } else { if ((full_key == false && ke->ref_shared_trx() != trx) || (full_key == true && ke->ref_full_shared_trx() != trx)) { ke->ref_shared(trx, full_key); keep = true; } } if (keep) { ++i; } else { // this should not happen with Map, but with List is possible i = key_list.erase(i); if (kel != ke) delete kel; } } if (trx->pa_unsafe()) last_pa_unsafe_ = trx->global_seqno(); key_count_ += key_count; } cert_debug << "END CERTIFICATION (success): " << *trx; return TEST_OK; cert_fail: cert_debug << "END CERTIFICATION (failed): " << *trx; if (store_keys == true) { // Clean up key entries allocated for this trx for (TrxHandle::CertKeySet::iterator i(key_list.begin()); i != key_list.end(); ++i) { KeyEntryOS* const kel(i->first); // Clean up cert_index_ from entries which were added by this trx CertIndex::iterator ci(cert_index_.find(kel)); if (ci != cert_index_.end()) { KeyEntryOS* ke(*ci); if (ke->ref_trx() == 0 && ke->ref_shared_trx() == 0) { // kel was added to cert_index_ by this trx - // remove from cert_index_ and fall through to delete if (ke->get_key().flags() != kel->get_key().flags()) { // two copies of keys in key list, shared and exclusive, // skip the one which was not used to create key entry assert(key_list.find(ke) != key_list.end()); continue; } assert(ke->ref_full_trx() == 0); assert(ke->ref_full_shared_trx() == 0); assert(kel == ke); cert_index_.erase(ci); } else if (ke == kel) { // kel was added and is referenced by another trx - skip it continue; } // else kel != ke : kel is a duplicate of ke with different // flags, fall through to delete } else { assert(0); // we actually should never be here, the key should // be either added to cert_index_ or be there already log_warn << "could not find key '" << kel->get_key() << "' from cert index"; } assert(kel->ref_trx() == 0); assert(kel->ref_shared_trx() == 0); assert(kel->ref_full_trx() == 0); assert(kel->ref_full_shared_trx() == 0); delete kel; } assert(cert_index_.size() == prev_cert_index_size); } return TEST_FAILED; } /*! for convenience returns true if conflict and false if not */ static inline bool certify_and_depend_v3(const galera::KeyEntryNG* const found, const galera::KeySet::KeyPart& key, galera::TrxHandle* const trx, bool const log_conflict) { const galera::TrxHandle* const ref_trx( found->ref_trx(galera::KeySet::Key::P_EXCLUSIVE)); if (cert_debug_on && ref_trx) { cert_debug << "exclusive match: " << *trx << " <-----> " << *ref_trx; } wsrep_seqno_t const ref_seqno(ref_trx ? ref_trx->global_seqno() : -1); // trx should not have any references in index at this point assert(ref_trx != trx); if (gu_likely(0 != ref_trx)) { // cert conflict takes place if // 1) write sets originated from different nodes, are within cert range // 2) ref_trx is in isolation mode, write sets are within cert range if ((trx->source_id() != ref_trx->source_id() || ref_trx->is_toi()) && ref_seqno > trx->last_seen_seqno()) { if (gu_unlikely(log_conflict == true)) { log_info << "trx conflict for key " << key << ": " << *trx << " <--X--> " << *ref_trx; } return true; } } wsrep_seqno_t depends_seqno(ref_seqno); galera::KeySet::Key::Prefix const pfx (key.prefix()); if (pfx == galera::KeySet::Key::P_EXCLUSIVE) // exclusive keys must depend on shared refs as well { const galera::TrxHandle* const ref_shared_trx( found->ref_trx(galera::KeySet::Key::P_SHARED)); assert(ref_shared_trx != trx); if (ref_shared_trx) { cert_debug << "shared match: " << *trx << " <-----> " << *ref_shared_trx; depends_seqno = std::max(ref_shared_trx->global_seqno(), depends_seqno); } } trx->set_depends_seqno(std::max(trx->depends_seqno(), depends_seqno)); return false; } /* returns true on collision, false otherwise */ static bool certify_v3(galera::Certification::CertIndexNG& cert_index_ng, const galera::KeySet::KeyPart& key, galera::TrxHandle* trx, bool const store_keys, bool const log_conflicts) { galera::KeyEntryNG ke(key); galera::Certification::CertIndexNG::iterator ci(cert_index_ng.find(&ke)); if (cert_index_ng.end() == ci) { if (store_keys) { galera::KeyEntryNG* const kep(new galera::KeyEntryNG(ke)); ci = cert_index_ng.insert(kep).first; cert_debug << "created new entry"; } return false; } else { cert_debug << "found existing entry"; galera::KeyEntryNG* const kep(*ci); // Note: For we skip certification for isolated trxs, only // cert index and key_list is populated. return (!trx->is_toi() && certify_and_depend_v3(kep, key, trx, log_conflicts)); } } galera::Certification::TestResult galera::Certification::do_test_v3(TrxHandle* trx, bool store_keys) { cert_debug << "BEGIN CERTIFICATION v3: " << *trx; #ifndef NDEBUG // to check that cleanup after cert failure returns cert_index_ // to original size size_t prev_cert_index_size(cert_index_.size()); #endif // NDEBUG const KeySetIn& key_set(trx->write_set_in().keyset()); long const key_count(key_set.count()); long processed(0); key_set.rewind(); for (; processed < key_count; ++processed) { const KeySet::KeyPart& key(key_set.next()); if (certify_v3(cert_index_ng_, key, trx, store_keys, log_conflicts_)) { goto cert_fail; } } trx->set_depends_seqno(std::max(trx->depends_seqno(), last_pa_unsafe_)); if (store_keys == true) { assert (key_count == processed); key_set.rewind(); for (long i(0); i < key_count; ++i) { const KeySet::KeyPart& k(key_set.next()); KeyEntryNG ke(k); CertIndexNG::const_iterator ci(cert_index_ng_.find(&ke)); if (ci == cert_index_ng_.end()) { gu_throw_fatal << "could not find key '" << k << "' from cert index"; } KeyEntryNG* const kep(*ci); kep->ref(k.prefix(), k, trx); } if (trx->pa_unsafe()) last_pa_unsafe_ = trx->global_seqno(); key_count_ += key_count; } cert_debug << "END CERTIFICATION (success): " << *trx; return TEST_OK; cert_fail: cert_debug << "END CERTIFICATION (failed): " << *trx; assert (processed < key_count); if (store_keys == true) { /* Clean up key entries allocated for this trx */ key_set.rewind(); /* 'strictly less' comparison is essential in the following loop: * processed key failed cert and was not added to index */ for (long i(0); i < processed; ++i) { KeyEntryNG ke(key_set.next()); // Clean up cert_index_ from entries which were added by this trx CertIndexNG::iterator ci(cert_index_ng_.find(&ke)); if (gu_likely(ci != cert_index_ng_.end())) { KeyEntryNG* kep(*ci); if (kep->referenced() == false) { // kel was added to cert_index_ by this trx - // remove from cert_index_ and fall through to delete cert_index_ng_.erase(ci); } else continue; assert(kep->referenced() == false); delete kep; } else if(ke.key().shared()) { assert(0); // we actually should never be here, the key should // be either added to cert_index_ or be there already log_warn << "could not find shared key '" << ke.key() << "' from cert index"; } else { /* exclusive can duplicate shared */ } } assert(cert_index_.size() == prev_cert_index_size); } return TEST_FAILED; } galera::Certification::TestResult galera::Certification::do_test(TrxHandle* trx, bool store_keys) { assert(trx->source_id() != WSREP_UUID_UNDEFINED); if (trx->version() != version_) { log_warn << "trx protocol version: " << trx->version() << " does not match certification protocol version: " << version_; return TEST_FAILED; } if (gu_unlikely(trx->last_seen_seqno() < initial_position_ || trx->global_seqno() - trx->last_seen_seqno() > max_length_)) { if (trx->last_seen_seqno() < initial_position_) { if (cert_index_.empty() == false) { log_warn << "last seen seqno below limit for trx " << *trx; } else { log_debug << "last seen seqno below limit for trx " << *trx; } } if (trx->global_seqno() - trx->last_seen_seqno() > max_length_) { log_warn << "certification interval for trx " << *trx << " exceeds the limit of " << max_length_; } return TEST_FAILED; } TestResult res(TEST_FAILED); gu::Lock lock(mutex_); // why do we need that? - e.g. set_trx_committed() /* initialize parent seqno */ if ((trx->flags() & (TrxHandle::F_ISOLATION | TrxHandle::F_PA_UNSAFE)) || trx_map_.empty()) { trx->set_depends_seqno(trx->global_seqno() - 1); } else { trx->set_depends_seqno( trx_map_.begin()->second->global_seqno() - 1); } switch (version_) { case 1: case 2: res = do_test_v1to2(trx, store_keys); break; case 3: res = do_test_v3(trx, store_keys); break; default: gu_throw_fatal << "certification test for version " << version_ << " not implemented"; } if (store_keys == true && res == TEST_OK) { ++trx_count_; gu::Lock lock(stats_mutex_); ++n_certified_; deps_dist_ += (trx->global_seqno() - trx->depends_seqno()); cert_interval_ += (trx->global_seqno() - trx->last_seen_seqno() - 1); index_size_ = (cert_index_.size() + cert_index_ng_.size()); } byte_count_ += trx->size(); return res; } galera::Certification::TestResult galera::Certification::do_test_preordered(TrxHandle* trx) { /* Source ID is not always available for preordered events (e.g. event * producer didn't provide any) so for now we must accept undefined IDs. */ //assert(trx->source_id() != WSREP_UUID_UNDEFINED); assert(trx->new_version()); assert(trx->preordered()); /* we don't want to go any further unless the writeset checksum is ok */ trx->verify_checksum(); // throws /* if checksum failed we need to throw ASAP, let the caller catch it, * flush monitors, save state and abort. */ /* This is a primitive certification test for preordered actions: * it does not handle gaps and relies on general apply monitor for * parallel applying. Ideally there should be a certification object * per source. */ if (gu_unlikely(last_preordered_id_ && (last_preordered_id_ + 1 != trx->trx_id()))) { log_warn << "Gap in preordered stream: source_id '" << trx->source_id() << "', trx_id " << trx->trx_id() << ", previous id " << last_preordered_id_; assert(0); } trx->set_depends_seqno(last_preordered_seqno_ - trx->write_set_in().pa_range() + 1); // +1 compensates for subtracting from a previous seqno, rather than own. last_preordered_seqno_ = trx->global_seqno(); last_preordered_id_ = trx->trx_id(); return TEST_OK; } galera::Certification::Certification(gu::Config& conf, ServiceThd& thd) : version_ (-1), trx_map_ (), cert_index_ (), cert_index_ng_ (), deps_set_ (), service_thd_ (thd), mutex_ (), trx_size_warn_count_ (0), initial_position_ (-1), position_ (-1), safe_to_discard_seqno_ (-1), last_pa_unsafe_ (-1), last_preordered_seqno_ (position_), last_preordered_id_ (0), stats_mutex_ (), n_certified_ (0), deps_dist_ (0), cert_interval_ (0), index_size_ (0), key_count_ (0), byte_count_ (0), trx_count_ (0), max_length_ (max_length(conf)), max_length_check_ (length_check(conf)), log_conflicts_ (conf.get(CERT_PARAM_LOG_CONFLICTS)) {} galera::Certification::~Certification() { log_info << "cert index usage at exit " << cert_index_.size(); log_info << "cert trx map usage at exit " << trx_map_.size(); log_info << "deps set usage at exit " << deps_set_.size(); double avg_cert_interval(0); double avg_deps_dist(0); size_t index_size(0); stats_get(avg_cert_interval, avg_deps_dist, index_size); log_info << "avg deps dist " << avg_deps_dist; log_info << "avg cert interval " << avg_cert_interval; log_info << "cert index size " << index_size; gu::Lock lock(mutex_); for_each(trx_map_.begin(), trx_map_.end(), PurgeAndDiscard(*this)); service_thd_.release_seqno(position_); service_thd_.flush(); } void galera::Certification::assign_initial_position(wsrep_seqno_t seqno, int version) { switch (version) { // value -1 used in initialization when trx protocol version is not // available case -1: case 1: case 2: case 3: break; default: gu_throw_fatal << "certification/trx version " << version << " not supported"; } gu::Lock lock(mutex_); if (seqno >= position_) { std::for_each(trx_map_.begin(), trx_map_.end(), PurgeAndDiscard(*this)); assert(cert_index_.size() == 0); assert(cert_index_ng_.size() == 0); } else { log_warn << "moving position backwards: " << position_ << " -> " << seqno; std::for_each(cert_index_.begin(), cert_index_.end(), gu::DeleteObject()); std::for_each(cert_index_ng_.begin(), cert_index_ng_.end(), gu::DeleteObject()); std::for_each(trx_map_.begin(), trx_map_.end(), Unref2nd()); cert_index_.clear(); cert_index_ng_.clear(); } trx_map_.clear(); log_info << "Assign initial position for certification: " << seqno << ", protocol version: " << version; initial_position_ = seqno; position_ = seqno; safe_to_discard_seqno_ = seqno; last_pa_unsafe_ = seqno; last_preordered_seqno_ = position_; last_preordered_id_ = 0; version_ = version; } galera::Certification::TestResult galera::Certification::test(TrxHandle* trx, bool bval) { assert(trx->global_seqno() >= 0 && trx->local_seqno() >= 0); const TestResult ret (trx->preordered() ? do_test_preordered(trx) : do_test(trx, bval)); if (gu_unlikely(ret != TEST_OK)) { // make sure that last depends seqno is -1 for trxs that failed // certification trx->set_depends_seqno(WSREP_SEQNO_UNDEFINED); } return ret; } wsrep_seqno_t galera::Certification::get_safe_to_discard_seqno_() const { wsrep_seqno_t retval; if (deps_set_.empty() == true) { retval = safe_to_discard_seqno_; } else { retval = (*deps_set_.begin()) - 1; } return retval; } wsrep_seqno_t galera::Certification::purge_trxs_upto_(wsrep_seqno_t const seqno, bool const handle_gcache) { assert (seqno > 0); TrxMap::iterator purge_bound(trx_map_.upper_bound(seqno)); cert_debug << "purging index up to " << seqno; for_each(trx_map_.begin(), purge_bound, PurgeAndDiscard(*this)); trx_map_.erase(trx_map_.begin(), purge_bound); if (handle_gcache) service_thd_.release_seqno(seqno); if (0 == ((trx_map_.size() + 1) % 10000)) { log_debug << "trx map after purge: length: " << trx_map_.size() << ", requested purge seqno: " << seqno << ", real purge seqno: " << trx_map_.begin()->first - 1; } return seqno; } galera::Certification::TestResult galera::Certification::append_trx(TrxHandle* trx) { assert(trx->global_seqno() >= 0 && trx->local_seqno() >= 0); assert(trx->global_seqno() > position_); trx->ref(); { gu::Lock lock(mutex_); if (gu_unlikely(trx->global_seqno() != position_ + 1)) { // this is perfectly normal if trx is rolled back just after // replication, keeping the log though log_debug << "seqno gap, position: " << position_ << " trx seqno " << trx->global_seqno(); } if (gu_unlikely((trx->last_seen_seqno() + 1) < trx_map_.begin()->first)) { /* See #733 - for now it is false positive */ cert_debug << "WARNING: last_seen_seqno is below certification index: " << trx_map_.begin()->first << " > " << trx->last_seen_seqno(); } position_ = trx->global_seqno(); if (gu_unlikely(!(position_ & max_length_check_) && (trx_map_.size() > static_cast(max_length_)))) { log_debug << "trx map size: " << trx_map_.size() << " - check if status.last_committed is incrementing"; wsrep_seqno_t trim_seqno(position_ - max_length_); wsrep_seqno_t const stds (get_safe_to_discard_seqno_()); if (trim_seqno > stds) { log_warn << "Attempt to trim certification index at " << trim_seqno << ", above safe-to-discard: " << stds; trim_seqno = stds; } else { cert_debug << "purging index up to " << trim_seqno; } purge_trxs_upto_(trim_seqno, true); } } const TestResult retval(test(trx)); { gu::Lock lock(mutex_); if (trx_map_.insert( std::make_pair(trx->global_seqno(), trx)).second == false) gu_throw_fatal << "duplicate trx entry " << *trx; deps_set_.insert(trx->last_seen_seqno()); assert(deps_set_.size() <= trx_map_.size()); } trx->mark_certified(); return retval; } wsrep_seqno_t galera::Certification::set_trx_committed(TrxHandle* trx) { assert(trx->global_seqno() >= 0 && trx->local_seqno() >= 0 && trx->is_committed() == false); wsrep_seqno_t ret(-1); { gu::Lock lock(mutex_); if (trx->is_certified() == true) { // trxs with depends_seqno == -1 haven't gone through // append_trx DepsSet::iterator i(deps_set_.find(trx->last_seen_seqno())); assert(i != deps_set_.end()); if (deps_set_.size() == 1) safe_to_discard_seqno_ = *i; deps_set_.erase(i); } if (gu_unlikely(index_purge_required())) { ret = get_safe_to_discard_seqno_(); } } trx->mark_committed(); trx->clear(); return ret; } galera::TrxHandle* galera::Certification::get_trx(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); TrxMap::iterator i(trx_map_.find(seqno)); if (i == trx_map_.end()) return 0; i->second->ref(); return i->second; } void galera::Certification::set_log_conflicts(const std::string& str) { try { bool const old(log_conflicts_); log_conflicts_ = gu::Config::from_config(str); if (old != log_conflicts_) { log_info << (log_conflicts_ ? "Enabled" : "Disabled") << " logging of certification conflicts."; } } catch (gu::NotFound& e) { gu_throw_error(EINVAL) << "Bad value '" << str << "' for boolean parameter '" << CERT_PARAM_LOG_CONFLICTS << '\''; } } galera-3-25.3.20/galera/src/gcs_action_source.hpp0000644000015300001660000000347613042054732021374 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_GCS_ACTION_SOURCE_HPP #define GALERA_GCS_ACTION_SOURCE_HPP #include "action_source.hpp" #include "galera_gcs.hpp" #include "replicator.hpp" #include "trx_handle.hpp" #include "GCache.hpp" #include "gu_atomic.hpp" namespace galera { class GcsActionSource : public galera::ActionSource { public: GcsActionSource(TrxHandle::SlavePool& sp, GCS_IMPL& gcs, Replicator& replicator, gcache::GCache& gcache) : trx_pool_ (sp ), gcs_ (gcs ), replicator_ (replicator), gcache_ (gcache ), received_ (0 ), received_bytes_(0 ) { } ~GcsActionSource() { log_info << trx_pool_; } ssize_t process(void*, bool& exit_loop); long long received() const { return received_(); } long long received_bytes() const { return received_bytes_(); } private: void dispatch(void*, const gcs_action&, bool& exit_loop); TrxHandle::SlavePool& trx_pool_; GCS_IMPL& gcs_; Replicator& replicator_; gcache::GCache& gcache_; gu::Atomic received_; gu::Atomic received_bytes_; }; class GcsActionTrx { public: GcsActionTrx(TrxHandle::SlavePool& sp, const struct gcs_action& act); ~GcsActionTrx(); TrxHandle* trx() const { return trx_; } private: GcsActionTrx(const GcsActionTrx&); void operator=(const GcsActionTrx&); TrxHandle* trx_; }; } #endif // GALERA_GCS_ACTION_SOURCE_HPP galera-3-25.3.20/galera/src/wsdb.cpp0000644000015300001660000001014113042054732016620 0ustar jenkinsjenkins/* * Copyright (C) 2010-2014 Codership Oy */ #include "wsdb.hpp" #include "trx_handle.hpp" #include "write_set.hpp" #include "gu_lock.hpp" #include "gu_throw.hpp" void galera::Wsdb::print(std::ostream& os) const { os << "trx map:\n"; for (galera::Wsdb::TrxMap::const_iterator i = trx_map_.begin(); i != trx_map_.end(); ++i) { os << i->first << " " << *i->second << "\n"; } os << "conn query map:\n"; for (galera::Wsdb::ConnMap::const_iterator i = conn_map_.begin(); i != conn_map_.end(); ++i) { os << i->first << " "; } os << "\n"; } galera::Wsdb::Wsdb() : trx_pool_ (TrxHandle::LOCAL_STORAGE_SIZE(), 512, "LocalTrxHandle"), trx_map_ (), trx_mutex_ (), conn_map_ (), conn_mutex_() {} galera::Wsdb::~Wsdb() { log_info << "wsdb trx map usage " << trx_map_.size() << " conn query map usage " << conn_map_.size(); log_info << trx_pool_; // With debug builds just print trx and query maps to stderr // and don't clean up to let valgrind etc to detect leaks. #ifndef NDEBUG std::cerr << *this; #else for_each(trx_map_.begin(), trx_map_.end(), Unref2nd()); #endif // !NDEBUG } inline galera::TrxHandle* galera::Wsdb::find_trx(wsrep_trx_id_t const trx_id) { gu::Lock lock(trx_mutex_); TrxMap::iterator const i(trx_map_.find(trx_id)); return (trx_map_.end() == i ? 0 : i->second); } inline galera::TrxHandle* galera::Wsdb::create_trx(const TrxHandle::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t const trx_id) { TrxHandle* trx(TrxHandle::New(trx_pool_, params, source_id, -1, trx_id)); gu::Lock lock(trx_mutex_); std::pair i (trx_map_.insert(std::make_pair(trx_id, trx))); if (gu_unlikely(i.second == false)) gu_throw_fatal; return i.first->second; } galera::TrxHandle* galera::Wsdb::get_trx(const TrxHandle::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t const trx_id, bool const create) { TrxHandle* retval(find_trx(trx_id)); if (0 == retval && create) retval = create_trx(params, source_id, trx_id); if (retval != 0) retval->ref(); return retval; } galera::Wsdb::Conn* galera::Wsdb::get_conn(wsrep_conn_id_t const conn_id, bool const create) { gu::Lock lock(conn_mutex_); ConnMap::iterator i(conn_map_.find(conn_id)); if (conn_map_.end() == i) { if (create == true) { std::pair p (conn_map_.insert(std::make_pair(conn_id, Conn(conn_id)))); if (gu_unlikely(p.second == false)) gu_throw_fatal; return &p.first->second; } return 0; } return &(i->second); } galera::TrxHandle* galera::Wsdb::get_conn_query(const TrxHandle::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t const conn_id, bool const create) { Conn* const conn(get_conn(conn_id, create)); if (0 == conn) return 0; if (conn->get_trx() == 0 && create == true) { TrxHandle* trx (TrxHandle::New(trx_pool_, params, source_id, conn_id, -1)); conn->assign_trx(trx); } return conn->get_trx(); } void galera::Wsdb::discard_trx(wsrep_trx_id_t trx_id) { gu::Lock lock(trx_mutex_); TrxMap::iterator i; if ((i = trx_map_.find(trx_id)) != trx_map_.end()) { i->second->unref(); trx_map_.erase(i); } } void galera::Wsdb::discard_conn_query(wsrep_conn_id_t conn_id) { gu::Lock lock(conn_mutex_); ConnMap::iterator i; if ((i = conn_map_.find(conn_id)) != conn_map_.end()) { i->second.assign_trx(0); } } void galera::Wsdb::discard_conn(wsrep_conn_id_t conn_id) { gu::Lock lock(conn_mutex_); ConnMap::iterator i; if ((i = conn_map_.find(conn_id)) != conn_map_.end()) { conn_map_.erase(i); } } galera-3-25.3.20/galera/src/data_set.hpp0000644000015300001660000001146413042054732017463 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #ifndef GALERA_DATA_SET_HPP #define GALERA_DATA_SET_HPP #include "gu_rset.hpp" #include "gu_vlq.hpp" namespace galera { class DataSet { public: enum Version { EMPTY = 0, VER1 }; static Version const MAX_VERSION = VER1; static Version version (unsigned int ver) { if (gu_likely (ver <= MAX_VERSION)) return static_cast(ver); gu_throw_error (EINVAL) << "Unrecognized DataSet version: " << ver; } /*! Dummy class to instantiate DataSetOut */ class RecordOut {}; /*! A class to instantiate DataSetIn: provides methods necessary to * iterate over the records serialized into single input buffer */ class RecordIn { public: static size_t serial_size (const gu::byte_t* const buf, size_t const size) { /* There's a single record in a dataset */ return size; } size_t serial_size () const { return size_; } RecordIn (const gu::byte_t* buf, size_t size) : size_(size), buf_(buf) {} gu::Buf buf() { gu::Buf ret = { buf_, size_ }; return ret; } private: ssize_t size_; const gu::byte_t* buf_; }; /* class RecordIn */ }; /* class DataSet */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif class DataSetOut : public gu::RecordSetOut { public: DataSetOut () // empty ctor for slave TrxHandle : gu::RecordSetOut(), version_() {} DataSetOut (gu::byte_t* reserved, size_t reserved_size, const BaseName& base_name, DataSet::Version version) : gu::RecordSetOut ( reserved, reserved_size, base_name, check_type (version), ds_to_rs_version(version) ), version_(version) {} size_t append (const void* const src, size_t const size, bool const store) { /* append data as is, don't count as a new record */ gu::RecordSetOut::append (src, size, store, false); /* this will be deserialized using DataSet::RecordIn in DataSetIn */ return size; } DataSet::Version version () const { return count() ? version_ : DataSet::EMPTY; } typedef gu::RecordSet::GatherVector GatherVector; private: // depending on version we may pack data differently DataSet::Version const version_; static gu::RecordSet::CheckType check_type (DataSet::Version ver) { switch (ver) { case DataSet::EMPTY: break; /* Can't create EMPTY DataSetOut */ case DataSet::VER1: return gu::RecordSet::CHECK_MMH128; } throw; } static gu::RecordSet::Version ds_to_rs_version (DataSet::Version ver) { switch (ver) { case DataSet::EMPTY: break; /* Can't create EMPTY DataSetOut */ case DataSet::VER1: return gu::RecordSet::VER1; } throw; } }; class DataSetIn : public gu::RecordSetIn { public: DataSetIn (DataSet::Version ver, const gu::byte_t* buf, size_t size) : gu::RecordSetIn(buf, size, false), version_(ver) {} DataSetIn () : gu::RecordSetIn(), version_(DataSet::EMPTY) {} void init (DataSet::Version ver, const gu::byte_t* buf, size_t size) { gu::RecordSetIn::init(buf, size, false); version_ = ver; } gu::Buf next () const { return gu::RecordSetIn::next().buf(); } private: DataSet::Version version_; }; /* class DataSetIn */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif } /* namespace galera */ #endif // GALERA_DATA_SET_HPP galera-3-25.3.20/galera/src/replicator_smm.hpp0000644000015300001660000004576113042054732020726 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // //! @file replicator_smm.hpp // // @brief Galera Synchronous Multi-Master replicator // #ifndef GALERA_REPLICATOR_SMM_HPP #define GALERA_REPLICATOR_SMM_HPP #include "replicator.hpp" #include "gu_init.h" #include "GCache.hpp" #include "gcs.hpp" #include "monitor.hpp" #include "wsdb.hpp" #include "certification.hpp" #include "trx_handle.hpp" #include "write_set.hpp" #include "galera_service_thd.hpp" #include "fsm.hpp" #include "gcs_action_source.hpp" #include "ist.hpp" #include "gu_atomic.hpp" #include "saved_state.hpp" #include "gu_debug_sync.hpp" #include namespace galera { class ReplicatorSMM : public Replicator { public: typedef enum { SST_NONE, SST_WAIT, SST_REQ_FAILED, SST_FAILED } SstState; static const size_t N_STATES = S_DONOR + 1; ReplicatorSMM(const wsrep_init_args* args); ~ReplicatorSMM(); int trx_proto_ver() const { return trx_params_.version_; } int repl_proto_ver() const{ return protocol_version_; } wsrep_status_t connect(const std::string& cluster_name, const std::string& cluster_url, const std::string& state_donor, bool bootstrap); wsrep_status_t close(); wsrep_status_t async_recv(void* recv_ctx); TrxHandle* get_local_trx(wsrep_trx_id_t trx_id, bool create = false) { return wsdb_.get_trx(trx_params_, uuid_, trx_id, create); } void unref_local_trx(TrxHandle* trx) { assert(trx->refcnt() > 1); trx->unref(); } void discard_local_trx(TrxHandle* trx) { trx->release_write_set_out(); wsdb_.discard_trx(trx->trx_id()); } TrxHandle* local_conn_trx(wsrep_conn_id_t conn_id, bool create) { return wsdb_.get_conn_query(trx_params_, uuid_, conn_id, create); } void discard_local_conn_trx(wsrep_conn_id_t conn_id) { wsdb_.discard_conn_query(conn_id); } void discard_local_conn(wsrep_conn_id_t conn_id) { wsdb_.discard_conn(conn_id); } void apply_trx(void* recv_ctx, TrxHandle* trx); wsrep_status_t replicate(TrxHandle* trx, wsrep_trx_meta_t*); void abort_trx(TrxHandle* trx) ; wsrep_status_t pre_commit(TrxHandle* trx, wsrep_trx_meta_t*); wsrep_status_t replay_trx(TrxHandle* trx, void* replay_ctx); wsrep_status_t post_commit(TrxHandle* trx); wsrep_status_t post_rollback(TrxHandle* trx); wsrep_status_t causal_read(wsrep_gtid_t*); wsrep_status_t to_isolation_begin(TrxHandle* trx, wsrep_trx_meta_t*); wsrep_status_t to_isolation_end(TrxHandle* trx); wsrep_status_t preordered_collect(wsrep_po_handle_t& handle, const struct wsrep_buf* data, size_t count, bool copy); wsrep_status_t preordered_commit(wsrep_po_handle_t& handle, const wsrep_uuid_t& source, uint64_t flags, int pa_range, bool commit); wsrep_status_t sst_sent(const wsrep_gtid_t& state_id, int rcode); wsrep_status_t sst_received(const wsrep_gtid_t& state_id, const void* state, size_t state_len, int rcode); void process_trx(void* recv_ctx, TrxHandle* trx); void process_commit_cut(wsrep_seqno_t seq, wsrep_seqno_t seqno_l); void process_conf_change(void* recv_ctx, const wsrep_view_info_t& view, int repl_proto, State next_state, wsrep_seqno_t seqno_l); void process_state_req(void* recv_ctx, const void* req, size_t req_size, wsrep_seqno_t seqno_l, wsrep_seqno_t donor_seq); void process_join(wsrep_seqno_t seqno, wsrep_seqno_t seqno_l); void process_sync(wsrep_seqno_t seqno_l); const struct wsrep_stats_var* stats_get() const; void stats_reset(); void stats_free(struct wsrep_stats_var*); /*! @throws NotFound */ void set_param (const std::string& key, const std::string& value); /*! @throws NotFound */ void param_set (const std::string& key, const std::string& value); std::string param_get (const std::string& key) const; const gu::Config& params() const { return config_; } wsrep_seqno_t pause(); void resume(); void desync(); void resync(); struct InitConfig { InitConfig(gu::Config&, const char* node_address, const char *base_dir); }; private: ReplicatorSMM(const ReplicatorSMM&); void operator=(const ReplicatorSMM&); struct Param { static const std::string base_host; static const std::string base_port; static const std::string base_dir; static const std::string proto_max; static const std::string key_format; static const std::string commit_order; static const std::string causal_read_timeout; static const std::string max_write_set_size; }; typedef std::pair Default; struct Defaults { std::map map_; Defaults (); }; static const Defaults defaults; // both a list of parameters and a list of default values wsrep_seqno_t last_committed() { return co_mode_ != CommitOrder::BYPASS ? commit_monitor_.last_left() : apply_monitor_.last_left(); } void report_last_committed(wsrep_seqno_t purge_seqno) { if (gu_unlikely(purge_seqno != -1)) { service_thd_.report_last_committed(purge_seqno); } } wsrep_status_t cert(TrxHandle* trx); wsrep_status_t cert_and_catch(TrxHandle* trx); wsrep_status_t cert_for_aborted(TrxHandle* trx); void update_state_uuid (const wsrep_uuid_t& u); void update_incoming_list (const wsrep_view_info_t& v); /* aborts/exits the program in a clean way */ void abort() GU_NORETURN; class LocalOrder { public: LocalOrder(TrxHandle& trx) : seqno_(trx.local_seqno()), trx_(&trx) { } LocalOrder(wsrep_seqno_t seqno) : seqno_(seqno), trx_(0) { } void lock() { if (trx_ != 0) trx_->lock(); } void unlock() { if (trx_ != 0) trx_->unlock(); } wsrep_seqno_t seqno() const { return seqno_; } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { return (last_left + 1 == seqno_); } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex& mutex) { if (trx_ != 0 && trx_->is_local()) { unlock(); mutex.unlock(); GU_DBUG_SYNC_WAIT("local_monitor_enter_sync"); mutex.lock(); lock(); } } #endif // GU_DBUG_ON private: LocalOrder(const LocalOrder&); wsrep_seqno_t seqno_; TrxHandle* trx_; }; class ApplyOrder { public: ApplyOrder(TrxHandle& trx) : trx_(trx) { } void lock() { trx_.lock(); } void unlock() { trx_.unlock(); } wsrep_seqno_t seqno() const { return trx_.global_seqno(); } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { return (trx_.is_local() == true || last_left >= trx_.depends_seqno()); } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex& mutex) { if (trx_.is_local()) { unlock(); mutex.unlock(); GU_DBUG_SYNC_WAIT("apply_monitor_enter_sync"); mutex.lock(); lock(); } else { unlock(); mutex.unlock(); GU_DBUG_SYNC_WAIT("apply_monitor_slave_enter_sync"); mutex.lock(); lock(); } } #endif // GU_DBUG_ON private: ApplyOrder(const ApplyOrder&); TrxHandle& trx_; }; public: class CommitOrder { public: typedef enum { BYPASS = 0, OOOC = 1, LOCAL_OOOC = 2, NO_OOOC = 3 } Mode; static Mode from_string(const std::string& str) { int ret(gu::from_string(str)); switch (ret) { case BYPASS: case OOOC: case LOCAL_OOOC: case NO_OOOC: break; default: gu_throw_error(EINVAL) << "invalid value " << str << " for commit order mode"; } return static_cast(ret); } CommitOrder(TrxHandle& trx, Mode mode) : trx_ (trx ), mode_(mode) { } void lock() { trx_.lock(); } void unlock() { trx_.unlock(); } wsrep_seqno_t seqno() const { return trx_.global_seqno(); } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { switch (mode_) { case BYPASS: gu_throw_fatal << "commit order condition called in bypass mode"; case OOOC: return true; case LOCAL_OOOC: return trx_.is_local(); // in case of remote trx fall through case NO_OOOC: return (last_left + 1 == trx_.global_seqno()); } gu_throw_fatal << "invalid commit mode value " << mode_; } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex& mutex) { if (trx_.is_local()) { unlock(); mutex.unlock(); GU_DBUG_SYNC_WAIT("commit_monitor_enter_sync"); mutex.lock(); lock(); } } #endif // GU_DBUG_ON private: CommitOrder(const CommitOrder&); TrxHandle& trx_; const Mode mode_; }; class StateRequest { public: virtual const void* req () const = 0; virtual ssize_t len () const = 0; virtual const void* sst_req () const = 0; virtual ssize_t sst_len () const = 0; virtual const void* ist_req () const = 0; virtual ssize_t ist_len () const = 0; virtual ~StateRequest() {} }; private: // state machine class Transition { public: Transition(State const from, State const to) : from_(from), to_(to) { } State from() const { return from_; } State to() const { return to_; } bool operator==(Transition const& other) const { return (from_ == other.from_ && to_ == other.to_); } class Hash { public: size_t operator()(Transition const& tr) const { return (gu::HashValue(static_cast(tr.from_)) ^ gu::HashValue(static_cast(tr.to_))); } }; private: State from_; State to_; }; void build_stats_vars (std::vector& stats); void establish_protocol_versions (int version); bool state_transfer_required(const wsrep_view_info_t& view_info); void prepare_for_IST (void*& req, ssize_t& req_len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t group_seqno); void recv_IST(void* recv_ctx); StateRequest* prepare_state_request (const void* sst_req, ssize_t sst_req_len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t group_seqno); void send_state_request (const StateRequest* req); void request_state_transfer (void* recv_ctx, const wsrep_uuid_t& group_uuid, wsrep_seqno_t group_seqno, const void* sst_req, ssize_t sst_req_len); wsrep_seqno_t donate_sst(void* recv_ctx, const StateRequest& streq, const wsrep_gtid_t& state_id, bool bypass); /* local state seqno for internal use (macro mock up) */ wsrep_seqno_t STATE_SEQNO(void) { return apply_monitor_.last_left(); } class InitLib /* Library initialization routines */ { public: InitLib (gu_log_cb_t cb) { gu_init(cb); } }; InitLib init_lib_; gu::Config config_; InitConfig init_config_; // registers configurable parameters and defaults struct ParseOptions { ParseOptions(Replicator& repl, gu::Config&, const char* opts); } parse_options_; // parse option string supplied on initialization class InitSSL { public: InitSSL(gu::Config& conf) { gu::ssl_init_options(conf); } } init_ssl_; // initialize global SSL parameters static int const MAX_PROTO_VER; /* * |------------------------------------------------------ * | protocol_version_ | trx version | str_proto_ver_ | * |------------------------------------------------------ * | 1 | 1 | 0 | * | 2 | 1 | 1 | * | 3 | 2 | 1 | * | 4 | 2 | 1 | * | 5 | 3 | 1 | * | 6 | 3 | 2 | * | 7 | 3 | 2 | * ------------------------------------------------------- */ int str_proto_ver_;// state transfer request protocol int protocol_version_;// general repl layer proto int proto_max_; // maximum allowed proto version FSM state_; SstState sst_state_; // configurable params const CommitOrder::Mode co_mode_; // commit order mode // persistent data location std::string state_file_; SavedState st_; // boolean telling if the node is safe to use for bootstrapping // a new primary component bool safe_to_bootstrap_; // currently installed trx parameters TrxHandle::Params trx_params_; // identifiers wsrep_uuid_t uuid_; wsrep_uuid_t const state_uuid_; const char state_uuid_str_[37]; wsrep_seqno_t cc_seqno_; // seqno of last CC wsrep_seqno_t pause_seqno_; // local seqno of last pause call // application callbacks void* app_ctx_; wsrep_view_cb_t view_cb_; wsrep_apply_cb_t apply_cb_; wsrep_commit_cb_t commit_cb_; wsrep_unordered_cb_t unordered_cb_; wsrep_sst_donate_cb_t sst_donate_cb_; wsrep_synced_cb_t synced_cb_; // SST std::string sst_donor_; wsrep_uuid_t sst_uuid_; wsrep_seqno_t sst_seqno_; gu::Mutex sst_mutex_; gu::Cond sst_cond_; int sst_retry_sec_; // services gcache::GCache gcache_; GCS_IMPL gcs_; ServiceThd service_thd_; // action sources TrxHandle::SlavePool slave_pool_; ActionSource* as_; GcsActionSource gcs_as_; ist::Receiver ist_receiver_; ist::AsyncSenderMap ist_senders_; // trx processing Wsdb wsdb_; Certification cert_; // concurrency control Monitor local_monitor_; Monitor apply_monitor_; Monitor commit_monitor_; gu::datetime::Period causal_read_timeout_; // counters gu::Atomic receivers_; gu::Atomic replicated_; gu::Atomic replicated_bytes_; gu::Atomic keys_count_; gu::Atomic keys_bytes_; gu::Atomic data_bytes_; gu::Atomic unrd_bytes_; gu::Atomic local_commits_; gu::Atomic local_rollbacks_; gu::Atomic local_cert_failures_; gu::Atomic local_replays_; gu::Atomic causal_reads_; gu::Atomic preordered_id_; // temporary preordered ID // non-atomic stats std::string incoming_list_; mutable gu::Mutex incoming_mutex_; mutable std::vector wsrep_stats_; }; std::ostream& operator<<(std::ostream& os, ReplicatorSMM::State state); } #endif /* GALERA_REPLICATOR_SMM_HPP */ galera-3-25.3.20/galera/src/saved_state.hpp0000644000015300001660000000307313042054732020176 0ustar jenkinsjenkins// // Copyright (C) 2012 Codership Oy // #ifndef GALERA_SAVED_STATE_HPP #define GALERA_SAVED_STATE_HPP #include "gu_atomic.hpp" #include "gu_mutex.hpp" #include "gu_lock.hpp" #include "wsrep_api.h" #include #include namespace galera { class SavedState { public: SavedState (const std::string& file); ~SavedState (); void get (wsrep_uuid_t& u, wsrep_seqno_t& s, bool& safe_to_bootstrap); void set (const wsrep_uuid_t& u, wsrep_seqno_t s, bool safe_to_bootstrap); void mark_unsafe(); void mark_safe(); void mark_corrupt(); void stats(long& marks, long& locks, long& writes) { marks = total_marks_(); locks = total_locks_; writes = total_writes_; } private: FILE* fs_; wsrep_uuid_t uuid_; wsrep_seqno_t seqno_; bool safe_to_bootstrap_; gu::Atomic unsafe_; bool corrupt_; /* this mutex is needed because mark_safe() and mark_corrupt() will be * called outside local monitor, so race is possible */ gu::Mutex mtx_; wsrep_uuid_t written_uuid_; ssize_t current_len_; gu::Atomic total_marks_; long total_locks_; long total_writes_; void write_and_flush (const wsrep_uuid_t& u, const wsrep_seqno_t s, bool safe_to_bootstrap); SavedState (const SavedState&); SavedState& operator=(const SavedState&); }; /* class SavedState */ } /* namespace galera */ #endif /* GALERA_SAVED_STATE_HPP */ galera-3-25.3.20/galera/src/ist.hpp0000644000015300001660000001134013042054732016467 0ustar jenkinsjenkins// // Copyright (C) 2011-2014 Codership Oy // #ifndef GALERA_IST_HPP #define GALERA_IST_HPP #include #include "wsrep_api.h" #include "galera_gcs.hpp" #include "trx_handle.hpp" #include "gu_config.hpp" #include "gu_lock.hpp" #include "gu_monitor.hpp" #include "gu_asio.hpp" #include #include namespace gcache { class GCache; } namespace galera { class TrxHandle; namespace ist { void register_params(gu::Config& conf); class Receiver { public: static std::string const RECV_ADDR; static std::string const RECV_BIND; Receiver(gu::Config& conf, TrxHandle::SlavePool&, const char* addr); ~Receiver(); std::string prepare(wsrep_seqno_t, wsrep_seqno_t, int); void ready(); int recv(TrxHandle** trx); wsrep_seqno_t finished(); void run(); private: void interrupt(); std::string recv_addr_; std::string recv_bind_; asio::io_service io_service_; asio::ip::tcp::acceptor acceptor_; asio::ssl::context ssl_ctx_; gu::Mutex mutex_; gu::Cond cond_; class Consumer { public: Consumer() : cond_(), trx_(0) { } ~Consumer() { } gu::Cond& cond() { return cond_; } void trx(TrxHandle* trx) { trx_ = trx; } TrxHandle* trx() const { return trx_; } private: // Non-copyable Consumer(const Consumer&); Consumer& operator=(const Consumer&); gu::Cond cond_; TrxHandle* trx_; }; std::stack consumers_; wsrep_seqno_t current_seqno_; wsrep_seqno_t last_seqno_; gu::Config& conf_; TrxHandle::SlavePool& trx_pool_; pthread_t thread_; int error_code_; int version_; bool use_ssl_; bool running_; bool ready_; // GCC 4.8.5 on FreeBSD wants this Receiver(const Receiver&); Receiver& operator=(const Receiver&); }; class Sender { public: Sender(const gu::Config& conf, gcache::GCache& gcache, const std::string& peer, int version); virtual ~Sender(); void send(wsrep_seqno_t first, wsrep_seqno_t last); void cancel() { if (use_ssl_ == true) { ssl_stream_->lowest_layer().close(); } else { socket_.close(); } } private: asio::io_service io_service_; asio::ip::tcp::socket socket_; asio::ssl::context ssl_ctx_; asio::ssl::stream* ssl_stream_; const gu::Config& conf_; gcache::GCache& gcache_; int version_; bool use_ssl_; Sender(const Sender&); void operator=(const Sender&); }; class AsyncSender; class AsyncSenderMap { public: AsyncSenderMap(GCS_IMPL& gcs, gcache::GCache& gcache) : senders_(), monitor_(), gcache_(gcache) { } void run(const gu::Config& conf, const std::string& peer, wsrep_seqno_t, wsrep_seqno_t, int); void remove(AsyncSender*, wsrep_seqno_t); void cancel(); gcache::GCache& gcache() { return gcache_; } private: std::set senders_; // use monitor instead of mutex, it provides cancellation point gu::Monitor monitor_; gcache::GCache& gcache_; }; } // namespace ist } // namespace galera #endif // GALERA_IST_HPP galera-3-25.3.20/galera/src/write_set.cpp0000644000015300001660000000637413042054732017703 0ustar jenkinsjenkins// // Copyright (C) 2010-2013 Codership Oy // #include "write_set.hpp" #include "gu_serialize.hpp" #include "gu_logger.hpp" size_t galera::WriteSet::serialize(gu::byte_t* buf, size_t buf_len, size_t offset) const { offset = gu::serialize4(keys_, buf, buf_len, offset); offset = gu::serialize4(data_, buf, buf_len, offset); return offset; } size_t galera::WriteSet::unserialize(const gu::byte_t* buf, size_t buf_len, size_t offset) { keys_.clear(); offset = gu::unserialize4(buf, buf_len, offset, keys_); offset = gu::unserialize4(buf, buf_len, offset, data_); return offset; } size_t galera::WriteSet::serial_size() const { return (gu::serial_size4(keys_) + gu::serial_size4(data_)); } std::pair galera::WriteSet::segment(const gu::byte_t* buf, size_t buf_len, size_t offset) { uint32_t data_len; offset = gu::unserialize4(buf, buf_len, offset, data_len); if (gu_unlikely(offset + data_len > buf_len)) { #ifdef NDEBUG gu_throw_error(EMSGSIZE); #else gu_throw_error(EMSGSIZE) << "offset: " << offset << ", data_len: " << data_len << ", buf_len: " << buf_len; #endif /* NDEBUG */ } return std::pair(offset, data_len); } size_t galera::WriteSet::keys(const gu::byte_t* buf, size_t buf_len, size_t offset, int version, KeySequence& ks) { std::pair seg(segment(buf, buf_len, offset)); offset = seg.first; const size_t seg_end(seg.first + seg.second); assert(seg_end <= buf_len); while (offset < seg_end) { KeyOS key(version); if ((offset = key.unserialize(buf, buf_len, offset)) == 0) { gu_throw_fatal << "failed to unserialize key"; } ks.push_back(key); } assert(offset == seg_end); return offset; } void galera::WriteSet::append_key(const KeyData& kd) { KeyOS key (kd.proto_ver, kd.parts, kd.parts_num, (kd.shared() ? galera::KeyOS::F_SHARED : 0) ); const size_t hash(key.hash()); std::pair range(key_refs_.equal_range(hash)); for (KeyRefMap::const_iterator i(range.first); i != range.second; ++i) { KeyOS cmp(version_); (void)cmp.unserialize(&keys_[0], keys_.size(), i->second); if (key == cmp && key.flags() == cmp.flags()) return; } size_t key_size(key.serial_size()); size_t offset(keys_.size()); keys_.resize(offset + key_size); (void)key.serialize(&keys_[0], keys_.size(), offset); (void)key_refs_.insert(std::make_pair(hash, offset)); } void galera::WriteSet::get_keys(KeySequence& s) const { size_t offset(0); while (offset < keys_.size()) { KeyOS key(version_); if ((offset = key.unserialize(&keys_[0], keys_.size(), offset)) == 0) { gu_throw_fatal << "failed to unserialize key"; } s.push_back(key); } assert(offset == keys_.size()); } galera-3-25.3.20/galera/src/write_set_ng.hpp0000644000015300001660000006615313042054732020375 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // /* * Planned writeset composition (not to scale): * * [WS header][ key set ][ data set ][ unordered set ] * * WS header contains common info: total size, set versions etc. * Key set and data set are always present, unordered set is optional. */ #ifndef GALERA_WRITE_SET_NG_HPP #define GALERA_WRITE_SET_NG_HPP #include "wsrep_api.h" #include "key_set.hpp" #include "data_set.hpp" #include "gu_serialize.hpp" #include "gu_vector.hpp" #include #include #include #include namespace galera { class WriteSetNG { public: static int const MAX_SIZE = 0x7fffffff; static int const MAX_PA_RANGE = 0x0000ffff; enum Version { VER3 = 3 }; /* Max header version that we can understand */ static Version const MAX_VERSION = VER3; /* Parses beginning of the header to detect writeset version and * returns it as raw integer for backward compatibility * static Version version(int v) will convert it to enum */ static int version(const void* const buf, size_t const buflen) { if (gu_likely(buflen >= 4)) { const gu::byte_t* const b(static_cast(buf)); if (b[0] == Header::MAGIC_BYTE && b[1] >= ((VER3 << 4) | VER3) && b[2] >= 32 /* header size will hardly ever go below 32 */) { int const min_ver(b[1] & 0x0f); int const max_ver(b[1] >> 4); if (min_ver <= max_ver) /* sanity check */ { /* supported situations: return max supported version */ if (max_ver < MAX_VERSION) return max_ver; if (min_ver <= MAX_VERSION) return MAX_VERSION; /* unsupported situation: minimum required version is * greater than maximum known */ return min_ver; } } else if (0 == b[1] && 0 == b[2] && b[3] <= 2) { /* header from 2.x and before */ return b[3]; } /* unrecognized header, fall through to error */ } return -1; } static Version version(int v) { switch (v) { case VER3: return VER3; } gu_throw_error (EPROTO) << "Unrecognized writeset version: " << v; } /* These flags should be fixed to wire protocol version and so * technically can't be initialized to WSREP_FLAG_xxx macros as the * latter may arbitrarily change. */ enum Flags { F_COMMIT = 1 << 0, F_ROLLBACK = 1 << 1, F_TOI = 1 << 2, F_PA_UNSAFE = 1 << 3, F_COMMUTATIVE = 1 << 4, F_NATIVE = 1 << 5 }; /* this takes care of converting wsrep API flags to on-the-wire flags */ static uint32_t wsrep_flags_to_ws_flags (uint32_t const flags); typedef gu::RecordSet::GatherVector GatherVector; /* TODO: separate metadata access from physical representation in * future versions */ class Header { public: static unsigned char const MAGIC_BYTE = 'G'; static Version version(const gu::Buf& buf) { /* the following will throw if version is not supported */ return WriteSetNG::version (WriteSetNG::version(buf.ptr, buf.size)); } static unsigned char size(Version ver) { switch (ver) { case VER3: return V3_SIZE; } log_fatal << "Unknown writeset version: " << ver; abort(); // want to dump core right here } /* This is for WriteSetOut */ explicit Header (Version ver) : local_(), ptr_(local_), ver_(ver), size_(size(ver)), chksm_() { assert (size_t(size_) <= sizeof(local_)); } size_t gather (KeySet::Version kver, DataSet::Version const dver, bool unord, bool annot, uint16_t flags, const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, GatherVector& out); /* records last_seen, timestamp and CRC before replication */ void set_last_seen (const wsrep_seqno_t& ls); /* records partial seqno, pa_range, timestamp and CRC before * replication (for preordered events)*/ void set_preordered (uint16_t pa_range) { uint16_t* const pa(reinterpret_cast (ptr_ + V3_PA_RANGE_OFF)); *pa = gu::htog(pa_range); set_last_seen (0); } /* This is for WriteSetIn */ explicit Header (const gu::Buf& buf) : local_(), ptr_ (reinterpret_cast( const_cast(buf.ptr))), ver_ (version(buf)), size_ (check_size(ver_, ptr_, buf.size)), chksm_(ver_, ptr_, size_) {} Header () : local_(), ptr_(NULL), ver_(), size_(0), chksm_() {} /* for late WriteSetIn initialization */ void read_buf (const gu::Buf& buf) { ver_ = version(buf); ptr_ = reinterpret_cast(const_cast(buf.ptr)); size_ = check_size (ver_, ptr_, buf.size); Checksum::verify(ver_, ptr_, size_); } Version version() const { return ver_; } unsigned char size() const { return size_; } const gu::byte_t* ptr() const { return ptr_; } KeySet::Version keyset_ver() const { return KeySet::version((ptr_[V3_SETS_OFF] & 0xf0) >> 4); } bool has_keys() const { return keyset_ver() != KeySet::EMPTY; } bool has_unrd() const { return (ptr_[V3_SETS_OFF] & V3_UNORD_FLAG); } bool has_annt() const { return (ptr_[V3_SETS_OFF] & V3_ANNOT_FLAG); } DataSet::Version dataset_ver() const { return DataSet::version((ptr_[V3_SETS_OFF] & 0x0c) >> 2); } DataSet::Version unrdset_ver() const { return has_unrd() ? dataset_ver() : DataSet::EMPTY; } DataSet::Version anntset_ver() const { return has_annt() ? dataset_ver() : DataSet::EMPTY; } uint16_t flags() const { return gu::gtoh( *(reinterpret_cast(ptr_ + V3_FLAGS_OFF)) ); } uint16_t pa_range() const { return gu::gtoh( *(reinterpret_cast(ptr_ + V3_PA_RANGE_OFF)) ); } wsrep_seqno_t last_seen() const { assert (pa_range() == 0); return seqno_priv(); } wsrep_seqno_t seqno() const { assert (pa_range() > 0); return seqno_priv(); } long long timestamp() const { return gu::gtoh( *(reinterpret_cast(ptr_+ V3_TIMESTAMP_OFF)) ); } const wsrep_uuid_t& source_id() const { return *(reinterpret_cast (ptr_ + V3_SOURCE_ID_OFF)); } wsrep_trx_id_t conn_id() const { return gu::gtoh( *(reinterpret_cast(ptr_ + V3_CONN_ID_OFF)) ); } wsrep_trx_id_t trx_id() const { return gu::gtoh( *(reinterpret_cast(ptr_ + V3_TRX_ID_OFF)) ); } const gu::byte_t* payload() const { return ptr_ + size(); } uint64_t get_checksum() const { const void* const checksum_ptr (reinterpret_cast(ptr_) + size_ - V3_CHECKSUM_SIZE); return gu::gtoh( *(static_cast(checksum_ptr))); } /* to set seqno and parallel applying range after certification */ void set_seqno(const wsrep_seqno_t& seqno, uint16_t pa_range); gu::Buf copy(bool include_keys, bool include_unrd) const; private: static ssize_t check_size (Version const ver, const gu::byte_t* const buf, ssize_t const bufsize) { assert (bufsize > 4); ssize_t const hsize(buf[V3_HEADER_SIZE_OFF]); if (gu_unlikely(hsize > bufsize)) { gu_throw_error (EMSGSIZE) << "Input buffer size " << bufsize << " smaller than header size " << hsize; } return hsize; } static int const V3_CHECKSUM_SIZE = 8; class Checksum { public: typedef uint64_t type_t; /* produce value (corrected for endianness) */ static void compute (const void* ptr, size_t size, type_t& value) { gu::FastHash::digest (ptr, size, value); value = gu::htog(value); } static void verify (Version ver, const void* ptr, ssize_t size); Checksum () {} Checksum (Version ver, const void* ptr, ssize_t size) { verify (ver, ptr, size); } private: GU_COMPILE_ASSERT(sizeof(type_t) == V3_CHECKSUM_SIZE, uhoh); }; static unsigned char const V3_ANNOT_FLAG = 0x01; static unsigned char const V3_UNORD_FLAG = 0x02; /* Fist 8 bytes of header: 0: 'G' - "magic" byte 1: bits 4-7: header version bits 0-3: minimum compatible version 2: header size (payload offset) 3: bits 4-7: keyset version bits 2-3: dataset version bit 1: has unordered set bit 0: has annotation 4-5: flags 6-7: PA range all multibyte integers are in little-endian encoding */ static int const V3_MAGIC_OFF = 0; static int const V3_HEADER_VERS_OFF = V3_MAGIC_OFF + 1; static int const V3_HEADER_SIZE_OFF = V3_HEADER_VERS_OFF + 1; static int const V3_SETS_OFF = V3_HEADER_SIZE_OFF + 1; static int const V3_FLAGS_OFF = V3_SETS_OFF + 1; static int const V3_PA_RANGE_OFF = V3_FLAGS_OFF + 2; static int const V3_LAST_SEEN_OFF = V3_PA_RANGE_OFF + 2; static int const V3_SEQNO_OFF = V3_LAST_SEEN_OFF; // seqno takes place of last seen static int const V3_TIMESTAMP_OFF = V3_LAST_SEEN_OFF + 8; static int const V3_SOURCE_ID_OFF = V3_TIMESTAMP_OFF + 8; static int const V3_CONN_ID_OFF = V3_SOURCE_ID_OFF + 16; static int const V3_TRX_ID_OFF = V3_CONN_ID_OFF + 8; static int const V3_CRC_OFF = V3_TRX_ID_OFF + 8; static int const V3_SIZE = V3_CRC_OFF + 8; // 64 struct Offsets { int const header_ver_; int const header_size_; int const sets_; int const flags_; int const pa_range_; int const last_seen_; int const seqno_; int const timestamp_; int const source_id_; int const conn_id_; int const trx_id_; int const crc_; Offsets(int, int, int, int, int, int, int, int, int, int, int, int); }; static Offsets const V3; static int const MAX_HEADER_SIZE = V3_SIZE; mutable gu::byte_t local_[MAX_HEADER_SIZE]; gu::byte_t* ptr_; Version ver_; gu::byte_t size_; Checksum chksm_; wsrep_seqno_t seqno_priv() const { return gu::gtoh( *(reinterpret_cast(ptr_+ V3_LAST_SEEN_OFF)) ); } static void update_checksum(gu::byte_t* const ptr, size_t const size) { Checksum::type_t cval; Checksum::compute (ptr, size, cval); *reinterpret_cast(ptr + size) = cval; } }; /* class Header */ private: static bool const WRITESET_FLAGS_MATCH_API_FLAGS = (WSREP_FLAG_COMMIT == F_COMMIT && WSREP_FLAG_ROLLBACK == F_ROLLBACK && WSREP_FLAG_ISOLATION == F_TOI && WSREP_FLAG_PA_UNSAFE == F_PA_UNSAFE && WSREP_FLAG_COMMUTATIVE == F_COMMUTATIVE && WSREP_FLAG_NATIVE == F_NATIVE); /* this assert should be removed when wsrep API flags become * explicitly incompatible with wirteset flags */ GU_COMPILE_ASSERT(WRITESET_FLAGS_MATCH_API_FLAGS, flags_incompatible); template static inline uint32_t wsrep_flags_to_ws_flags_tmpl (uint32_t const flags) { uint32_t ret(0); if (flags & WSREP_FLAG_COMMIT) ret |= F_COMMIT; if (flags & WSREP_FLAG_ROLLBACK) ret |= F_ROLLBACK; if (flags & WSREP_FLAG_ISOLATION) ret |= F_TOI; if (flags & WSREP_FLAG_PA_UNSAFE) ret |= F_PA_UNSAFE; if (flags & WSREP_FLAG_COMMUTATIVE) ret |= F_COMMUTATIVE; if (flags & WSREP_FLAG_NATIVE) ret |= F_NATIVE; return ret; } }; /* class WriteSetNG */ /* specialization for the case when WS flags fully match API flags */ template <> inline uint32_t WriteSetNG::wsrep_flags_to_ws_flags_tmpl(uint32_t const flags) { return flags; } inline uint32_t WriteSetNG::wsrep_flags_to_ws_flags (uint32_t const flags) { return wsrep_flags_to_ws_flags_tmpl (flags); } class WriteSetOut { public: typedef gu::RecordSetOutBase::BaseName BaseName; WriteSetOut (const std::string& dir_name, wsrep_trx_id_t id, KeySet::Version kver, gu::byte_t* reserved, size_t reserved_size, uint16_t flags = 0, WriteSetNG::Version ver = WriteSetNG::MAX_VERSION, DataSet::Version dver = DataSet::MAX_VERSION, DataSet::Version uver = DataSet::MAX_VERSION, size_t max_size = WriteSetNG::MAX_SIZE) : header_(ver), base_name_(dir_name, id), /* 1/8 of reserved (aligned by 8) goes to key set */ kbn_ (base_name_), keys_ (reserved, (reserved_size >>= 6, reserved_size <<= 3, reserved_size), kbn_, kver), /* 5/8 of reserved goes to data set */ dbn_ (base_name_), data_ (reserved + reserved_size, reserved_size*5, dbn_, dver), /* 2/8 of reserved goes to unordered set */ ubn_ (base_name_), unrd_ (reserved + reserved_size*6, reserved_size*2, ubn_, uver), /* annotation set is not allocated unless requested */ abn_ (base_name_), annt_ (NULL), left_ (max_size - keys_.size() - data_.size() - unrd_.size() - header_.size()), flags_ (flags) {} ~WriteSetOut() { delete annt_; } void append_key(const KeyData& k) { left_ -= keys_.append(k); } void append_data(const void* data, size_t data_len, bool store) { left_ -= data_.append(data, data_len, store); } void append_unordered(const void* data, size_t data_len, bool store) { left_ -= unrd_.append(data, data_len, store); } void append_annotation(const void* data, size_t data_len, bool store) { if (NULL == annt_) { annt_ = new DataSetOut(NULL, 0, abn_, DataSet::MAX_VERSION); left_ -= annt_->size(); } left_ -= annt_->append(data, data_len, store); } void set_flags(uint16_t flags) { flags_ = flags; } void add_flags(uint16_t flags) { flags_ |= flags; } void mark_toi() { flags_ |= WriteSetNG::F_TOI; } void mark_pa_unsafe() { flags_ |= WriteSetNG::F_PA_UNSAFE; } bool is_empty() const { return ((data_.count() + keys_.count() + unrd_.count() + (annt_ ? annt_->count() : 0)) == 0); } /* !!! This returns header without checksum! * * Use set_last_seen() to finalize it. */ size_t gather(const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, WriteSetNG::GatherVector& out) { check_size(); out->reserve (out->size() + keys_.page_count() + data_.page_count() + unrd_.page_count() + 1 /* global header */); size_t out_size (header_.gather (keys_.version(), data_.version(), unrd_.version() != DataSet::EMPTY, NULL != annt_, flags_, source, conn, trx, out)); out_size += keys_.gather(out); out_size += data_.gather(out); out_size += unrd_.gather(out); if (NULL != annt_) out_size += annt_->gather(out); return out_size; } void set_last_seen (const wsrep_seqno_t& ls) { header_.set_last_seen(ls); } void set_preordered (ssize_t pa_range) { assert (pa_range >= 0); /* By current convention pa_range is off by 1 from wsrep API def. * 0 meaning failed certification. */ pa_range++; /* cap PA range by maximum we can represent */ if (gu_unlikely(pa_range > WriteSetNG::MAX_PA_RANGE)) pa_range = WriteSetNG::MAX_PA_RANGE; header_.set_preordered(pa_range + 1); } private: struct BaseNameCommon { const std::string& dir_name_; unsigned long long const id_; BaseNameCommon(const std::string& dir_name, unsigned long long id) : dir_name_(dir_name), id_ (id) {} }; template class BaseNameImpl : public BaseName { const BaseNameCommon& data_; public: BaseNameImpl (const BaseNameCommon& data) : data_(data) {} void print(std::ostream& os) const { os << data_.dir_name_ << "/0x" << std::hex << std::setfill('0') << std::setw(8) << data_.id_ << suffix_; } }; /* class BaseNameImpl */ static const char keys_suffix[]; static const char data_suffix[]; static const char unrd_suffix[]; static const char annt_suffix[]; WriteSetNG::Header header_; BaseNameCommon base_name_; BaseNameImpl kbn_; KeySetOut keys_; BaseNameImpl dbn_; DataSetOut data_; BaseNameImpl ubn_; DataSetOut unrd_; BaseNameImpl abn_; DataSetOut* annt_; ssize_t left_; uint16_t flags_; void check_size() { if (gu_unlikely(left_ < 0)) gu_throw_error (EMSGSIZE) << "Maximum writeset size exceeded by " << -left_; } WriteSetOut (const WriteSetOut&); WriteSetOut& operator= (const WriteSetOut); }; /* class WriteSetOut */ class WriteSetIn { public: WriteSetIn (const gu::Buf& buf, ssize_t const st = SIZE_THRESHOLD) : header_(buf), size_ (buf.size), keys_ (), data_ (), unrd_ (), annt_ (NULL), check_thr_id_(), check_thr_(false), check_ (false) { init (st); } WriteSetIn () : header_(), size_ (0), keys_ (), data_ (), unrd_ (), annt_ (NULL), check_thr_id_(), check_thr_(false), check_ (false) {} /* WriteSetIn(buf) == WriteSetIn() + read_buf(buf) */ void read_buf (const gu::Buf& buf, ssize_t const st = SIZE_THRESHOLD) { assert (0 == size_); assert (false == check_); header_.read_buf (buf); size_ = buf.size; init (st); } void read_buf (const gu::byte_t* const ptr, ssize_t const len) { assert (ptr != NULL); assert (len >= 0); gu::Buf tmp = { ptr, len }; read_buf (tmp); } ~WriteSetIn () { if (gu_unlikely(check_thr_)) { /* checksum was performed in a parallel thread */ pthread_join (check_thr_id_, NULL); } delete annt_; } size_t size() const { return size_; } uint16_t flags() const { return header_.flags(); } bool is_toi() const { return flags() & WriteSetNG::F_TOI; } bool pa_unsafe() const { return flags() & WriteSetNG::F_PA_UNSAFE; } int pa_range() const { return header_.pa_range(); } bool certified() const { return header_.pa_range(); } wsrep_seqno_t last_seen() const { return header_.last_seen(); } wsrep_seqno_t seqno() const { return header_.seqno(); } long long timestamp() const { return header_.timestamp(); } const wsrep_uuid_t& source_id() const { return header_.source_id(); } wsrep_conn_id_t conn_id() const { return header_.conn_id(); } wsrep_trx_id_t trx_id() const { return header_.trx_id(); } const KeySetIn& keyset() const { return keys_; } const DataSetIn& dataset() const { return data_; } const DataSetIn& unrdset() const { return unrd_; } bool annotated() const { return (annt_ != NULL); } void write_annotation(std::ostream& os) const; /* This should be called right after certification verdict is obtained * and before it is finalized. */ void verify_checksum() const /* throws */ { if (gu_unlikely(check_thr_)) { /* checksum was performed in a parallel thread */ pthread_join (check_thr_id_, NULL); check_thr_ = false; checksum_fin(); } } uint64_t get_checksum() const { /* since data segment is the only thing that definitely stays * unchanged through WS lifetime, it is the WS signature */ return (data_.get_checksum()); } void set_seqno(const wsrep_seqno_t& seqno, ssize_t pa_range) { assert (seqno > 0); assert (pa_range >= 0); /* cap PA range by maximum we can represent */ if (gu_unlikely(pa_range > WriteSetNG::MAX_PA_RANGE)) pa_range = WriteSetNG::MAX_PA_RANGE; header_.set_seqno (seqno, pa_range); } typedef gu::Vector GatherVector; /* can return pointer to internal storage: out can be used only * within object scope. */ size_t gather(GatherVector& out, bool include_keys, bool include_unrd) const; private: WriteSetNG::Header header_; ssize_t size_; KeySetIn keys_; DataSetIn data_; DataSetIn unrd_; DataSetIn* annt_; pthread_t check_thr_id_; bool mutable check_thr_; bool check_; static size_t const SIZE_THRESHOLD = 1 << 22; /* 4Mb */ void checksum (); /* checksums writeset, stores result in check_ */ void checksum_fin() const { if (gu_unlikely(!check_)) { gu_throw_error(EINVAL) << "Writeset checksum failed"; } } static void* checksum_thread (void* arg) { WriteSetIn* ws(reinterpret_cast(arg)); ws->checksum(); return NULL; } /* late initialization after default constructor */ void init (ssize_t size_threshold); WriteSetIn (const WriteSetIn&); WriteSetIn& operator=(WriteSetIn); }; } /* namespace galera */ #endif // GALERA_WRITE_SET_HPP galera-3-25.3.20/galera/src/key_set.hpp0000644000015300001660000005032513042054732017341 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #ifndef GALERA_KEY_SET_HPP #define GALERA_KEY_SET_HPP #include "gu_rset.hpp" #include "gu_unordered.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include "key_data.hpp" namespace galera { /* forward declarations for KeySet::KeyPart */ class KeySetOut; class KeySet { public: enum Version { EMPTY = 0, FLAT8, /* 8-byte hash (flat) */ FLAT8A, /* 8-byte hash (flat), annotated */ FLAT16, /* 16-byte hash (flat) */ FLAT16A, /* 16-byte hash (flat), annotated */ // TREE8, /* 8-byte hash + full serialized key */ MAX_VERSION = FLAT16A }; static Version version (unsigned int ver) { if (gu_likely (ver <= MAX_VERSION)) return static_cast(ver); throw_version(ver); } static Version version (const std::string& ver); class Key { public: enum Prefix { P_SHARED = 0, P_EXCLUSIVE, P_LAST = P_EXCLUSIVE }; }; /* class Key */ /* This class describes what commonly would be referred to as a "key". * It is called KeyPart because it does not fully represent a multi-part * key, but only nth part out of N total. * To fully represent a 3-part key p1:p2:p3 one would need 3 such objects: * for parts p1, p1:p2, p1:p2:p3 */ class KeyPart { public: static size_t const TMP_STORE_SIZE = 4096; static size_t const MAX_HASH_SIZE = 16; struct TmpStore { gu::byte_t buf[TMP_STORE_SIZE]; }; union HashData { gu::byte_t buf[MAX_HASH_SIZE]; uint64_t align; }; /* This ctor creates a serialized representation of a key in tmp store * from a key hash and optional annotation. */ KeyPart (TmpStore& tmp, const HashData& hash, Version const ver, bool const exclusive, const wsrep_buf_t* parts, /* for annotation */ int const part_num ) : data_(tmp.buf) { assert(ver > EMPTY && ver <= MAX_VERSION); /* 16 if ver in { FLAT16, FLAT16A }, 8 otherwise */ int const key_size (8 << (static_cast(ver - FLAT16) <= 1)); memcpy (tmp.buf, hash.buf, key_size); /* use lower bits for header: */ /* clear header bits */ gu::byte_t b = tmp.buf[0] & (~HEADER_MASK); /* set prefix */ if (exclusive) { b |= (Key::P_EXCLUSIVE & PREFIX_MASK); } /* set version */ b |= (ver & VERSION_MASK) << PREFIX_BITS; tmp.buf[0] = b; if (annotated(ver)) { store_annotation(parts, part_num, tmp.buf + key_size,sizeof(tmp.buf) - key_size); } } /* This ctor uses pointer to a permanently stored serialized key part */ KeyPart (const gu::byte_t* const buf, size_t const size) : data_(buf) { if (gu_likely(size >= 8 && serial_size() <= size)) return; throw_buffer_too_short (serial_size(), size); } explicit KeyPart (const gu::byte_t* ptr = NULL) : data_(ptr) {} Key::Prefix prefix() const { gu::byte_t const p(data_[0] & PREFIX_MASK); if (gu_likely(p <= Key::P_LAST)) return static_cast(p); throw_bad_prefix(p); } bool shared() const { return prefix() == Key::P_SHARED; } bool exclusive() const { return prefix() == Key::P_EXCLUSIVE; } static Version version(const gu::byte_t* const buf) { return Version( buf ? (buf[0] >> PREFIX_BITS) & VERSION_MASK : EMPTY); } Version version() const { return KeyPart::version(data_); } KeyPart (const KeyPart& k) : data_(k.data_) {} KeyPart& operator= (const KeyPart& k) { data_ = k.data_; return *this; } /* for hash table */ bool matches (const KeyPart& kp) const { assert (NULL != this->data_); assert (NULL != kp.data_); bool ret(true); // collision by default #if GU_WORDSIZE == 64 const uint64_t* lhs(reinterpret_cast(data_)); const uint64_t* rhs(reinterpret_cast(kp.data_)); #else const uint32_t* lhs(reinterpret_cast(data_)); const uint32_t* rhs(reinterpret_cast(kp.data_)); #endif /* WORDSIZE */ switch (std::min(version(), kp.version())) { case EMPTY: assert(0); throw_match_empty_key(version(), kp.version()); case FLAT16: case FLAT16A: #if GU_WORDSIZE == 64 ret = (lhs[1] == rhs[1]); #else ret = (lhs[2] == rhs[2] && lhs[3] == rhs[3]); #endif /* WORDSIZE */ case FLAT8: case FLAT8A: /* shift is to clear up the header */ #if GU_WORDSIZE == 64 ret = ret && ((gtoh64(lhs[0]) >> HEADER_BITS) == (gtoh64(rhs[0]) >> HEADER_BITS)); #else ret = ret && (lhs[1] == rhs[1] && (gtoh32(lhs[0]) >> HEADER_BITS) == (gtoh32(rhs[0]) >> HEADER_BITS)); #endif /* WORDSIZE */ } return ret; } size_t hash () const { /* Now this leaves uppermost bits always 0. * How bad is it in practice? Is it reasonable to assume that only * lower bits are used in unordered set? */ size_t ret(gu::gtoh(reinterpret_cast(data_)[0]) >> HEADER_BITS); return ret; // (ret ^ (ret << HEADER_BITS)) to cover 0 bits } static size_t serial_size (const gu::byte_t* const buf, size_t const size) { Version const ver(version(buf)); return serial_size (ver, buf, size); } size_t serial_size () const { return KeyPart::serial_size(data_, -1U); } void print (std::ostream& os) const; void swap (KeyPart& other) { using std::swap; swap(data_, other.data_); } const gu::byte_t* ptr() const { return data_; } protected: friend class KeySetOut; /* update data pointer */ void update_ptr(const gu::byte_t* ptr) const { data_ = ptr; } /* update storage of KeyPart already inserted in unordered set */ void store(gu::RecordSetOut& rs) const { data_ = rs.append(data_, serial_size(), true, true).first; // log_info << "Stored key of size: " << serial_size(); } private: static unsigned int const PREFIX_BITS = 2; static gu::byte_t const PREFIX_MASK = (1 << PREFIX_BITS) - 1; static unsigned int const VERSION_BITS = 3; static gu::byte_t const VERSION_MASK = (1 << VERSION_BITS) - 1; static unsigned int const HEADER_BITS = PREFIX_BITS + VERSION_BITS; static gu::byte_t const HEADER_MASK = (1 << HEADER_BITS) - 1; mutable /* to be able to store const object */ const gu::byte_t* data_; // it never owns the buffer static size_t base_size (Version const ver, const gu::byte_t* const buf, size_t const size) { switch (ver) { case FLAT16: case FLAT16A: return 16; case FLAT8: case FLAT8A: return 8; case EMPTY: assert(0); } abort(); } static bool annotated (Version const ver) { return (ver == FLAT16A || ver == FLAT8A); } typedef uint16_t ann_size_t; static size_t serial_size (Version const ver, const gu::byte_t* const buf, size_t const size = -1U) { size_t ret(base_size(ver, buf, size)); assert (ret <= size); if (annotated(ver)) { assert (ret + 2 <= size); ret +=gu::gtoh(*reinterpret_cast(buf + ret)); assert (ret <= size); } return ret; } static size_t store_annotation (const wsrep_buf_t* parts, int part_num, gu::byte_t* buf, int size); static void print_annotation (std::ostream& os, const gu::byte_t* buf); static void throw_buffer_too_short (size_t expected, size_t got) GU_NORETURN; static void throw_bad_prefix (gu::byte_t p) GU_NORETURN; static void throw_match_empty_key (Version my, Version other) GU_NORETURN; }; /* class KeyPart */ class KeyPartHash { public: size_t operator() (const KeyPart& k) const { return k.hash(); } }; class KeyPartEqual { public: bool operator() (const KeyPart& l, const KeyPart& r) const { return (l.matches(r)); } }; /* functor KeyPartEqual */ static void throw_version(int) GU_NORETURN; }; /* class KeySet */ inline void swap (KeySet::KeyPart& a, KeySet::KeyPart& b) { a.swap(b); } inline std::ostream& operator << (std::ostream& os, const KeySet::KeyPart& kp) { kp.print (os); return os; } #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif class KeySetOut : public gu::RecordSetOut { public: typedef gu::UnorderedSet < KeySet::KeyPart, KeySet::KeyPartHash, KeySet::KeyPartEqual > /* This #if decides whether we use straight gu::UnorderedSet for appended * key parts (0), or go for an optimized version (1). Don't remove it. */ #if 0 KeyParts; #else KeyPartSet; /* This is a naive mock up of an "unordered set" that first tries to use * preallocated set of buckets and falls back to a "real" heap-based * unordered set from STL/TR1 when preallocated one is exhausted. * The goal is to make sure that at least 3 keys can be inserted without * the need for dynamic allocation. * In practice, with 64 "buckets" and search depth of 3, the average * number of inserted keys before there is a need to go for heap is 25. * 128 buckets will give you 45 and 256 - around 80. */ class KeyParts { public: KeyParts() : first_(), second_(NULL), first_size_(0) { ::memset(first_, 0, sizeof(first_)); } ~KeyParts() { delete second_; } /* This iterator class is declared for compatibility with * unordered_set. We may actually use a more simple interface here. */ class iterator { public: iterator(const KeySet::KeyPart* kp) : kp_(kp) {} /* This is sort-of a dirty hack to ensure that first_ array * of KeyParts class can be treated like a POD array. * It uses the fact that the only non-static member of * KeySet::KeyPart is gu::byte_t* and so does direct casts between * pointers. I wish someone could make it cleaner. */ iterator(const gu::byte_t** kp) : kp_(reinterpret_cast(kp)) {} const KeySet::KeyPart* operator -> () const { return kp_; } const KeySet::KeyPart& operator * () const { return *kp_; } bool operator == (const iterator& i) const { return (kp_ == i.kp_); } bool operator != (const iterator& i) const { return (kp_ != i.kp_); } private: const KeySet::KeyPart* kp_; }; const iterator end() { return iterator(static_cast(NULL)); } const iterator find(const KeySet::KeyPart& kp) { unsigned int idx(kp.hash()); for (unsigned int i(0); i < FIRST_DEPTH; ++i, ++idx) { idx &= FIRST_MASK; if (0 !=first_[idx] && KeySet::KeyPart(first_[idx]).matches(kp)) { return iterator(&first_[idx]); } } if (second_ && second_->size() > 0) { KeyPartSet::iterator i2(second_->find(kp)); if (i2 != second_->end()) return iterator(&(*i2)); } return end(); } std::pair insert(const KeySet::KeyPart& kp) { unsigned int idx(kp.hash()); for (unsigned int i(0); i < FIRST_DEPTH; ++i, ++idx) { idx &= FIRST_MASK; if (0 == first_[idx]) { first_[idx] = kp.ptr(); ++first_size_; return std::pair(iterator(&first_[idx]), true); } if (KeySet::KeyPart(first_[idx]).matches(kp)) { return std::pair(iterator(&first_[idx]),false); } } if (!second_) { second_ = new KeyPartSet(); // log_info << "Requesting heap at load factor " // << first_size_ << '/' << FIRST_SIZE << " = " // << (double(first_size_)/FIRST_SIZE); } std::pair res = second_->insert(kp); return std::pair(iterator(&(*res.first)), res.second); } iterator erase(iterator it) { unsigned int idx(it->hash()); for (unsigned int i(0); i < FIRST_DEPTH; ++i, ++idx) { idx &= FIRST_MASK; if (first_[idx] && KeySet::KeyPart(first_[idx]).matches(*it)) { first_[idx] = 0; --first_size_; return iterator(&first_[(idx + 1) & FIRST_MASK]); } } if (second_ && second_->size() > 0) { KeyPartSet::iterator it2(second_->erase(second_->find(*it))); if (it2 != second_->end()) return iterator(&(*it2)); } return end(); } size_t size() const { return (first_size_ + second_->size()); } private: static unsigned int const FIRST_MASK = 0x3f; // 63 static unsigned int const FIRST_SIZE = FIRST_MASK + 1; static unsigned int const FIRST_DEPTH = 3; const gu::byte_t* first_[FIRST_SIZE]; KeyPartSet* second_; unsigned int first_size_; }; #endif /* 1 */ class KeyPart { public: KeyPart (KeySet::Version const ver = KeySet::FLAT16) : hash_ (), part_ (0), value_(0), size_ (0), ver_ (ver), own_ (false) { assert (ver_); } /* to throw in KeyPart() ctor in case it is a duplicate */ class DUPLICATE {}; KeyPart (KeyParts& added, KeySetOut& store, const KeyPart* parent, const KeyData& kd, int const part_num); KeyPart (const KeyPart& k) : hash_ (k.hash_), part_ (k.part_), value_(k.value_), size_ (k.size_), ver_ (k.ver_), own_ (k.own_) { assert (ver_); k.own_ = false; } friend void swap (KeyPart& l, KeyPart& r) { using std::swap; swap (l.hash_, r.hash_ ); swap (l.part_, r.part_ ); swap (l.value_, r.value_); swap (l.size_, r.size_ ); swap (l.ver_, r.ver_ ); swap (l.own_, r.own_ ); } KeyPart& operator= (KeyPart k) { swap(*this, k); return *this; } bool match (const void* const v, size_t const s) const { return (size_ == s && !(::memcmp (value_, v, size_))); } bool exclusive () const { return (part_ && part_->exclusive()); } bool shared () const { return !exclusive(); } void acquire() { gu::byte_t* tmp = new gu::byte_t[size_]; std::copy(value_, value_ + size_, tmp); value_ = tmp; own_ = true; } void release() { if (own_) { // log_debug << "released: " << gu::Hexdump(value_, size_, true); delete[] value_; value_ = 0; } own_ = false; } ~KeyPart() { release(); } void print (std::ostream& os) const; typedef gu::RecordSet::GatherVector GatherVector; private: gu::Hash hash_; const KeySet::KeyPart* part_; mutable const gu::byte_t* value_; unsigned int size_; KeySet::Version ver_; mutable bool own_; }; /* class KeySetOut::KeyPart */ KeySetOut () // empty ctor for slave TrxHandle : gu::RecordSetOut(), added_(), prev_ (), new_ (), version_() {} KeySetOut (gu::byte_t* reserved, size_t reserved_size, const BaseName& base_name, KeySet::Version const version) : gu::RecordSetOut ( reserved, reserved_size, base_name, check_type (version), ks_to_rs_version(version) ), added_(), prev_ (), new_ (), version_(version) { assert (version_ != KeySet::EMPTY); KeyPart zero(version_); prev_().push_back(zero); } ~KeySetOut () {} size_t append (const KeyData& kd); KeySet::Version version () { return count() ? version_ : KeySet::EMPTY; } private: // depending on version we may pack data differently KeyParts added_; gu::Vector prev_; gu::Vector new_; KeySet::Version version_; static gu::RecordSet::CheckType check_type (KeySet::Version ver) { switch (ver) { case KeySet::EMPTY: break; /* Can't create EMPTY KeySetOut */ default: return gu::RecordSet::CHECK_MMH128; } KeySet::throw_version(ver); } static gu::RecordSet::Version ks_to_rs_version (KeySet::Version ver) { switch (ver) { case KeySet::EMPTY: break; /* Can't create EMPTY KeySetOut */ default: return gu::RecordSet::VER1; } KeySet::throw_version(ver); } }; /* class KeySetOut */ inline std::ostream& operator << (std::ostream& os, const KeySetOut::KeyPart& kp) { kp.print (os); return os; } class KeySetIn : public gu::RecordSetIn { public: KeySetIn (KeySet::Version ver, const gu::byte_t* buf, size_t size) : gu::RecordSetIn(buf, size, false), version_(ver) {} KeySetIn () : gu::RecordSetIn(), version_(KeySet::EMPTY) {} void init (KeySet::Version ver, const gu::byte_t* buf, size_t size) { gu::RecordSetIn::init(buf, size, false); version_ = ver; } KeySet::KeyPart const next () const { return gu::RecordSetIn::next(); } private: KeySet::Version version_; }; /* class KeySetIn */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif } /* namespace galera */ #endif // GALERA_KEY_SET_HPP galera-3-25.3.20/galera/src/replicator.cpp0000644000015300001660000000115213042054732020027 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "replicator.hpp" namespace galera { std::string const Replicator::Param::debug_log = "debug"; #ifdef GU_DBUG_ON std::string const Replicator::Param::dbug = "dbug"; std::string const Replicator::Param::signal = "signal"; #endif // GU_DBUG_ON void Replicator::register_params(gu::Config& conf) { conf.add(Param::debug_log, "no"); #ifdef GU_DBUG_ON conf.add(Param::dbug, ""); conf.add(Param::signal, ""); #endif // GU_DBUG_ON } const char* const Replicator::TRIVIAL_SST(WSREP_STATE_TRANSFER_TRIVIAL); } /* namespace galera */ galera-3-25.3.20/galera/src/ist_proto.hpp0000644000015300001660000005126113042054732017720 0ustar jenkinsjenkins// // Copyright (C) 2011-2014 Codership Oy // #ifndef GALERA_IST_PROTO_HPP #define GALERA_IST_PROTO_HPP #include "trx_handle.hpp" #include "GCache.hpp" #include "gu_logger.hpp" #include "gu_serialize.hpp" #include "gu_vector.hpp" // // Message class must have non-virtual destructor until // support up to version 3 is removed as serialization/deserialization // depends on the size of the class. // #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif // // Sender Receiver // connect() -----> accept() // <----- send_handshake() // send_handshake_response() -----> // <----- send_ctrl(OK) // send_trx() -----> // -----> // send_ctrl(EOF) -----> // <----- close() // close() // // Note about protocol/message versioning: // Version is determined by GCS and IST protocol is initialized in total // order. Therefore it is not necessary to negotiate version at IST level, // it should be enough to check that message version numbers match. // namespace galera { namespace ist { class Message { public: typedef enum { T_NONE = 0, T_HANDSHAKE = 1, T_HANDSHAKE_RESPONSE = 2, T_CTRL = 3, T_TRX = 4 } Type; Message(int version = -1, Type type = T_NONE, uint8_t flags = 0, int8_t ctrl = 0, uint64_t len = 0) : version_(version), type_ (type ), flags_ (flags ), ctrl_ (ctrl ), len_ (len ) { } ~Message() { } int version() const { return version_; } Type type() const { return type_ ; } uint8_t flags() const { return flags_ ; } int8_t ctrl() const { return ctrl_ ; } uint64_t len() const { return len_ ; } size_t serial_size() const { if (version_ > 3) { // header: version 1 byte, type 1 byte, flags 1 byte, // ctrl field 1 byte return 4 + sizeof(len_); } else { return sizeof(*this); } } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset)const { #ifndef NDEBUG size_t orig_offset(offset); #endif // NDEBUG if (version_ > 3) { offset = gu::serialize1(uint8_t(version_), buf, buflen, offset); offset = gu::serialize1(uint8_t(type_), buf, buflen, offset); offset = gu::serialize1(flags_, buf, buflen, offset); offset = gu::serialize1(ctrl_, buf, buflen, offset); offset = gu::serialize8(len_, buf, buflen, offset); } else { if (buflen < offset + sizeof(*this)) { gu_throw_error(EMSGSIZE) << "buffer too short"; } *reinterpret_cast(buf + offset) = *this; offset += sizeof(*this); } assert((version_ > 3 && offset - orig_offset == 12) || (offset - orig_offset == sizeof(*this))); return offset; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { assert(version_ >= 0); #ifndef NDEBUG size_t orig_offset(offset); #endif // NDEBUG uint8_t u8; if (version_ > 3) { offset = gu::unserialize1(buf, buflen, offset, u8); } else { u8 = *reinterpret_cast(buf + offset); } if (u8 != version_) { gu_throw_error(EPROTO) << "invalid protocol version " << int(u8) << ", expected " << version_; } if (u8 > 3) { version_ = u8; offset = gu::unserialize1(buf, buflen, offset, u8); type_ = static_cast(u8); offset = gu::unserialize1(buf, buflen, offset, flags_); offset = gu::unserialize1(buf, buflen, offset, ctrl_); offset = gu::unserialize8(buf, buflen, offset, len_); } else { if (buflen < offset + sizeof(*this)) { gu_throw_error(EMSGSIZE) <<" buffer too short for version " << version_ << ": " << buflen << " " << offset << " " << sizeof(*this); } *this = *reinterpret_cast(buf + offset); offset += sizeof(*this); } assert((version_ > 3 && offset - orig_offset == 12) || (offset - orig_offset == sizeof(*this))); return offset; } private: int version_; // unfortunately for compatibility with older // versions we must leave it as int (4 bytes) Type type_; uint8_t flags_; int8_t ctrl_; uint64_t len_; }; class Handshake : public Message { public: Handshake(int version = -1) : Message(version, Message::T_HANDSHAKE, 0, 0, 0) { } }; class HandshakeResponse : public Message { public: HandshakeResponse(int version = -1) : Message(version, Message::T_HANDSHAKE_RESPONSE, 0, 0, 0) { } }; class Ctrl : public Message { public: enum { // negative values reserved for error codes C_OK = 0, C_EOF = 1 }; Ctrl(int version = -1, int8_t code = 0) : Message(version, Message::T_CTRL, 0, code, 0) { } }; class Trx : public Message { public: Trx(int version = -1, uint64_t len = 0) : Message(version, Message::T_TRX, 0, 0, len) { } }; class Proto { public: Proto(TrxHandle::SlavePool& sp, int version, bool keep_keys) : trx_pool_ (sp), raw_sent_ (0), real_sent_(0), version_ (version), keep_keys_(keep_keys) { } ~Proto() { if (raw_sent_ > 0) { log_info << "ist proto finished, raw sent: " << raw_sent_ << " real sent: " << real_sent_ << " frac: " << (raw_sent_ == 0 ? 0. : static_cast(real_sent_)/raw_sent_); } } template void send_handshake(ST& socket) { Handshake hs(version_); gu::Buffer buf(hs.serial_size()); size_t offset(hs.serialize(&buf[0], buf.size(), 0)); size_t n(asio::write(socket, asio::buffer(&buf[0], buf.size()))); if (n != offset) { gu_throw_error(EPROTO) << "error sending handshake"; } } template void recv_handshake(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving handshake"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "handshake msg: " << msg.version() << " " << msg.type() << " " << msg.len(); switch (msg.type()) { case Message::T_HANDSHAKE: break; case Message::T_CTRL: switch (msg.ctrl()) { case Ctrl::C_EOF: gu_throw_error(EINTR); default: gu_throw_error(EPROTO) << "unexpected ctrl code: " << msg.ctrl(); } break; default: gu_throw_error(EPROTO) << "unexpected message type: " << msg.type(); } if (msg.version() != version_) { gu_throw_error(EPROTO) << "mismatching protocol version: " << msg.version() << " required: " << version_; } // TODO: Figure out protocol versions to use } template void send_handshake_response(ST& socket) { HandshakeResponse hsr(version_); gu::Buffer buf(hsr.serial_size()); size_t offset(hsr.serialize(&buf[0], buf.size(), 0)); size_t n(asio::write(socket, asio::buffer(&buf[0], buf.size()))); if (n != offset) { gu_throw_error(EPROTO) << "error sending handshake response"; } } template void recv_handshake_response(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving handshake"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "handshake response msg: " << msg.version() << " " << msg.type() << " " << msg.len(); switch (msg.type()) { case Message::T_HANDSHAKE_RESPONSE: break; case Message::T_CTRL: switch (msg.ctrl()) { case Ctrl::C_EOF: gu_throw_error(EINTR) << "interrupted by ctrl"; default: gu_throw_error(EPROTO) << "unexpected ctrl code: " << msg.ctrl(); } default: gu_throw_error(EINVAL) << "unexpected message type: " << msg.type(); } } template void send_ctrl(ST& socket, int8_t code) { Ctrl ctrl(version_, code); gu::Buffer buf(ctrl.serial_size()); size_t offset(ctrl.serialize(&buf[0], buf.size(), 0)); size_t n(asio::write(socket, asio::buffer(&buf[0],buf.size()))); if (n != offset) { gu_throw_error(EPROTO) << "error sending ctrl message"; } } template int8_t recv_ctrl(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving handshake"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "msg: " << msg.version() << " " << msg.type() << " " << msg.len(); switch (msg.type()) { case Message::T_CTRL: break; default: gu_throw_error(EPROTO) << "unexpected message type: " << msg.type(); } return msg.ctrl(); } template void send_trx(ST& socket, const gcache::GCache::Buffer& buffer) { const bool rolled_back(buffer.seqno_d() == -1); galera::WriteSetIn ws; boost::array cbs; size_t payload_size; /* size of the 2nd cbs buffer */ size_t sent; if (gu_unlikely(rolled_back)) { payload_size = 0; } else { if (keep_keys_ || version_ < WS_NG_VERSION) { payload_size = buffer.size(); const void* const ptr(buffer.ptr()); cbs[1] = asio::const_buffer(ptr, payload_size); cbs[2] = asio::const_buffer(ptr, 0); } else { gu::Buf tmp = { buffer.ptr(), buffer.size() }; ws.read_buf (tmp, 0); WriteSetIn::GatherVector out; payload_size = ws.gather (out, false, false); assert (2 == out->size()); cbs[1] = asio::const_buffer(out[0].ptr, out[0].size); cbs[2] = asio::const_buffer(out[1].ptr, out[1].size); } } size_t const trx_meta_size( 8 /* serial_size(buffer.seqno_g()) */ + 8 /* serial_size(buffer.seqno_d()) */ ); Trx trx_msg(version_, trx_meta_size + payload_size); gu::Buffer buf(trx_msg.serial_size() + trx_meta_size); size_t offset(trx_msg.serialize(&buf[0], buf.size(), 0)); offset = gu::serialize8(buffer.seqno_g(), &buf[0], buf.size(), offset); offset = gu::serialize8(buffer.seqno_d(), &buf[0], buf.size(), offset); cbs[0] = asio::const_buffer(&buf[0], buf.size()); if (gu_likely(payload_size)) { sent = asio::write(socket, cbs); } else { sent = asio::write(socket, asio::buffer(cbs[0])); } log_debug << "sent " << sent << " bytes"; } template galera::TrxHandle* recv_trx(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving trx header"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "received header: " << n << " bytes, type " << msg.type() << " len " << msg.len(); switch (msg.type()) { case Message::T_TRX: { // TODO: ideally we want to make seqno_g and cert verdict // be a part of msg object above, so that we can skip this // read. The overhead is tiny given that vast majority of // messages will be trx writesets. wsrep_seqno_t seqno_g, seqno_d; buf.resize(sizeof(seqno_g) + sizeof(seqno_d)); n = asio::read(socket, asio::buffer(&buf[0], buf.size())); if (n != buf.size()) { gu_throw_error(EPROTO) << "error reading trx meta data"; } size_t offset(gu::unserialize8(&buf[0], buf.size(), 0, seqno_g)); offset = gu::unserialize8(&buf[0], buf.size(), offset, seqno_d); galera::TrxHandle* trx(galera::TrxHandle::New(trx_pool_)); if (seqno_d == WSREP_SEQNO_UNDEFINED) { if (offset != msg.len()) { gu_throw_error(EINVAL) << "message size " << msg.len() << " does not match expected size " << offset; } } else { MappedBuffer& wbuf(trx->write_set_collection()); size_t const wsize(msg.len() - offset); wbuf.resize(wsize); n = asio::read(socket, asio::buffer(&wbuf[0], wbuf.size())); if (gu_unlikely(n != wbuf.size())) { gu_throw_error(EPROTO) << "error reading write set data"; } trx->unserialize(&wbuf[0], wbuf.size(), 0); } if (seqno_d == WSREP_SEQNO_UNDEFINED || trx->version() < 3) { trx->set_received(0, -1, seqno_g); trx->set_depends_seqno(seqno_d); } else { trx->set_received_from_ws(); assert(trx->global_seqno() == seqno_g); assert(trx->depends_seqno() >= seqno_d); } trx->mark_certified(); log_debug << "received trx body: " << *trx; return trx; } case Message::T_CTRL: switch (msg.ctrl()) { case Ctrl::C_EOF: return 0; default: if (msg.ctrl() >= 0) { gu_throw_error(EPROTO) << "unexpected ctrl code: " << msg.ctrl(); } else { gu_throw_error(-msg.ctrl()) <<"peer reported error"; } } default: gu_throw_error(EPROTO) << "unexpected message type: " << msg.type(); } gu_throw_fatal; throw; return 0; // keep compiler happy } private: TrxHandle::SlavePool& trx_pool_; uint64_t raw_sent_; uint64_t real_sent_; int version_; bool keep_keys_; }; } } #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif #endif // GALERA_IST_PROTO_HPP galera-3-25.3.20/galera/src/replicator_str.cpp0000644000015300001660000006161513042054732020731 0ustar jenkinsjenkins// // Copyright (C) 2010-2015 Codership Oy // #include "replicator_smm.hpp" #include "uuid.hpp" #include namespace galera { bool ReplicatorSMM::state_transfer_required(const wsrep_view_info_t& view_info) { if (view_info.state_gap) { assert(view_info.view >= 0); if (state_uuid_ == view_info.state_id.uuid) // common history { wsrep_seqno_t const group_seqno(view_info.state_id.seqno); wsrep_seqno_t const local_seqno(STATE_SEQNO()); if (state_() >= S_JOINING) /* See #442 - S_JOINING should be a valid state here */ { return (local_seqno < group_seqno); } else { if (local_seqno > group_seqno) { close(); gu_throw_fatal << "Local state seqno (" << local_seqno << ") is greater than group seqno (" <(req_ + offset))); } void* req (ssize_t offset) const { if (len(offset) > 0) return req_ + offset + sizeof(uint32_t); else return 0; } ssize_t const len_; char* const req_; bool const own_; }; std::string const StateRequest_v1::MAGIC("STRv1"); #ifndef INT32_MAX #define INT32_MAX 0x7fffffff #endif StateRequest_v1::StateRequest_v1 ( const void* const sst_req, ssize_t const sst_req_len, const void* const ist_req, ssize_t const ist_req_len) : len_(MAGIC.length() + 1 + sizeof(uint32_t) + sst_req_len + sizeof(uint32_t) + ist_req_len), req_(reinterpret_cast(malloc(len_))), own_(true) { if (!req_) gu_throw_error (ENOMEM) << "Could not allocate state request v1"; if (sst_req_len > INT32_MAX || sst_req_len < 0) gu_throw_error (EMSGSIZE) << "SST request length (" << sst_req_len << ") unrepresentable"; if (ist_req_len > INT32_MAX || ist_req_len < 0) gu_throw_error (EMSGSIZE) << "IST request length (" << sst_req_len << ") unrepresentable"; char* ptr(req_); strcpy (ptr, MAGIC.c_str()); ptr += MAGIC.length() + 1; uint32_t* tmp(reinterpret_cast(ptr)); *tmp = htogl(sst_req_len); ptr += sizeof(uint32_t); memcpy (ptr, sst_req, sst_req_len); ptr += sst_req_len; tmp = reinterpret_cast(ptr); *tmp = htogl(ist_req_len); ptr += sizeof(uint32_t); memcpy (ptr, ist_req, ist_req_len); assert ((ptr - req_) == (len_ - ist_req_len)); } // takes ownership over str buffer StateRequest_v1::StateRequest_v1 (const void* str, ssize_t str_len) : len_(str_len), req_(reinterpret_cast(const_cast(str))), own_(false) { if (sst_offset() + 2*sizeof(uint32_t) > size_t(len_)) { assert(0); gu_throw_error (EINVAL) << "State transfer request is too short: " << len_ << ", must be at least: " << (sst_offset() + 2*sizeof(uint32_t)); } if (strncmp (req_, MAGIC.c_str(), MAGIC.length())) { assert(0); gu_throw_error (EINVAL) << "Wrong magic signature in state request v1."; } if (sst_offset() + sst_len() + 2*sizeof(uint32_t) > size_t(len_)) { gu_throw_error (EINVAL) << "Malformed state request v1: sst length: " << sst_len() << ", total length: " << len_; } if (ist_offset() + ist_len() + sizeof(uint32_t) != size_t(len_)) { gu_throw_error (EINVAL) << "Malformed state request v1: parsed field " "length " << sst_len() << " is not equal to total request length " << len_; } } static ReplicatorSMM::StateRequest* read_state_request (const void* const req, size_t const req_len) { const char* const str(reinterpret_cast(req)); if (req_len > StateRequest_v1::MAGIC.length() && !strncmp(str, StateRequest_v1::MAGIC.c_str(), StateRequest_v1::MAGIC.length())) { return (new StateRequest_v1(req, req_len)); } else { return (new StateRequest_v0(req, req_len)); } } class IST_request { public: IST_request() : peer_(), uuid_(), last_applied_(), group_seqno_() { } IST_request(const std::string& peer, const wsrep_uuid_t& uuid, wsrep_seqno_t last_applied, wsrep_seqno_t group_seqno) : peer_(peer), uuid_(uuid), last_applied_(last_applied), group_seqno_(group_seqno) { } const std::string& peer() const { return peer_ ; } const wsrep_uuid_t& uuid() const { return uuid_ ; } wsrep_seqno_t last_applied() const { return last_applied_; } wsrep_seqno_t group_seqno() const { return group_seqno_; } private: friend std::ostream& operator<<(std::ostream&, const IST_request&); friend std::istream& operator>>(std::istream&, IST_request&); std::string peer_; wsrep_uuid_t uuid_; wsrep_seqno_t last_applied_; wsrep_seqno_t group_seqno_; }; std::ostream& operator<<(std::ostream& os, const IST_request& istr) { return (os << istr.uuid_ << ":" << istr.last_applied_ << "-" << istr.group_seqno_ << "|" << istr.peer_); } std::istream& operator>>(std::istream& is, IST_request& istr) { char c; return (is >> istr.uuid_ >> c >> istr.last_applied_ >> c >> istr.group_seqno_ >> c >> istr.peer_); } static void get_ist_request(const ReplicatorSMM::StateRequest* str, IST_request* istr) { assert(str->ist_len()); std::string ist_str(reinterpret_cast(str->ist_req()), str->ist_len()); std::istringstream is(ist_str); is >> *istr; } static bool sst_is_trivial (const void* const req, size_t const len) { /* Check that the first string in request == ReplicatorSMM::TRIVIAL_SST */ size_t const trivial_len = strlen(ReplicatorSMM::TRIVIAL_SST) + 1; return (len >= trivial_len && !memcmp (req, ReplicatorSMM::TRIVIAL_SST, trivial_len)); } wsrep_seqno_t ReplicatorSMM::donate_sst(void* const recv_ctx, const StateRequest& streq, const wsrep_gtid_t& state_id, bool const bypass) { wsrep_cb_status const err(sst_donate_cb_(app_ctx_, recv_ctx, streq.sst_req(), streq.sst_len(), &state_id, 0, 0, bypass)); /* The fix to codership/galera#284 may break backward comatibility due to * different (now correct) interpretation of retrun value. Default to old * interpretation which is forward compatible with the new one. */ #if NO_BACKWARD_COMPATIBILITY wsrep_seqno_t const ret (WSREP_CB_SUCCESS == err ? state_id.seqno : -ECANCELED); #else wsrep_seqno_t const ret (int(err) >= 0 ? state_id.seqno : int(err)); #endif /* NO_BACKWARD_COMPATIBILITY */ if (ret < 0) { log_error << "SST " << (bypass ? "bypass " : "") << "failed: " << err; } return ret; } void ReplicatorSMM::process_state_req(void* recv_ctx, const void* req, size_t req_size, wsrep_seqno_t const seqno_l, wsrep_seqno_t const donor_seq) { assert(recv_ctx != 0); assert(seqno_l > -1); assert(req != 0); LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); apply_monitor_.drain(donor_seq); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.drain(donor_seq); state_.shift_to(S_DONOR); StateRequest* const streq (read_state_request (req, req_size)); // somehow the following does not work, string is initialized beyond // the first \0: //std::string const req_str(reinterpret_cast(streq->sst_req()), // streq->sst_len()); // have to resort to C ways. char* const tmp(strndup(reinterpret_cast(streq->sst_req()), streq->sst_len())); std::string const req_str(tmp); free (tmp); bool const skip_state_transfer (sst_is_trivial(streq->sst_req(), streq->sst_len()) /* compatibility with older garbd, to be removed in * the next release (2.1)*/ || req_str == std::string(WSREP_STATE_TRANSFER_NONE) ); wsrep_seqno_t rcode (0); bool join_now = true; if (!skip_state_transfer) { if (streq->ist_len()) { IST_request istr; get_ist_request(streq, &istr); if (istr.uuid() == state_uuid_) { log_info << "IST request: " << istr; try { gcache_.seqno_lock(istr.last_applied() + 1); } catch(gu::NotFound& nf) { log_info << "IST first seqno " << istr.last_applied() + 1 << " not found from cache, falling back to SST"; // @todo: close IST channel explicitly goto full_sst; } if (streq->sst_len()) // if joiner is waiting for SST, notify it { wsrep_gtid_t const state_id = { istr.uuid(), istr.last_applied() }; rcode = donate_sst(recv_ctx, *streq, state_id, true); // we will join in sst_sent. join_now = false; } if (rcode >= 0) { try { // Note: End of IST range must be cc_seqno_ instead // of istr.group_seqno() in case there are CCs between // sending and delivering STR. If there are no // intermediate CCs, cc_seqno_ == istr.group_seqno(). // Then duplicate message concern in #746 will be // releaved. ist_senders_.run(config_, istr.peer(), istr.last_applied() + 1, cc_seqno_, protocol_version_); } catch (gu::Exception& e) { log_error << "IST failed: " << e.what(); rcode = -e.get_errno(); } } else { log_error << "Failed to bypass SST"; } goto out; } } full_sst: if (streq->sst_len()) { assert(0 == rcode); wsrep_gtid_t const state_id = { state_uuid_, donor_seq }; rcode = donate_sst(recv_ctx, *streq, state_id, false); // we will join in sst_sent. join_now = false; } else { log_warn << "SST request is null, SST canceled."; rcode = -ECANCELED; } } out: delete streq; local_monitor_.leave(lo); if (join_now || rcode < 0) { gcs_.join(rcode < 0 ? rcode : donor_seq); } } void ReplicatorSMM::prepare_for_IST (void*& ptr, ssize_t& len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t const group_seqno) { if (state_uuid_ != group_uuid) { gu_throw_error (EPERM) << "Local state UUID (" << state_uuid_ << ") does not match group state UUID (" << group_uuid << ')'; } wsrep_seqno_t const local_seqno(STATE_SEQNO()); if (local_seqno < 0) { gu_throw_error (EPERM) << "Local state seqno is undefined"; } assert(local_seqno < group_seqno); std::ostringstream os; std::string recv_addr = ist_receiver_.prepare( local_seqno + 1, group_seqno, protocol_version_); os << IST_request(recv_addr, state_uuid_, local_seqno, group_seqno); char* str = strdup (os.str().c_str()); // cppcheck-suppress nullPointer if (!str) gu_throw_error (ENOMEM) << "Failed to allocate IST buffer."; len = strlen(str) + 1; ptr = str; } ReplicatorSMM::StateRequest* ReplicatorSMM::prepare_state_request (const void* const sst_req, ssize_t const sst_req_len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t const group_seqno) { try { switch (str_proto_ver_) { case 0: return new StateRequest_v0 (sst_req, sst_req_len); case 1: case 2: { void* ist_req(0); ssize_t ist_req_len(0); try { gu_trace(prepare_for_IST (ist_req, ist_req_len, group_uuid, group_seqno)); } catch (gu::Exception& e) { log_warn << "Failed to prepare for incremental state transfer: " << e.what() << ". IST will be unavailable."; } StateRequest* ret = new StateRequest_v1 (sst_req, sst_req_len, ist_req, ist_req_len); free (ist_req); return ret; } default: gu_throw_fatal << "Unsupported STR protocol: " << str_proto_ver_; } } catch (std::exception& e) { log_fatal << "State request preparation failed, aborting: " << e.what(); } catch (...) { log_fatal << "State request preparation failed, aborting: unknown exception"; } abort(); } static bool retry_str(int ret) { return (ret == -EAGAIN || ret == -ENOTCONN); } void ReplicatorSMM::send_state_request (const StateRequest* const req) { long ret; long tries = 0; gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; if (req->ist_len()) { IST_request istr; get_ist_request(req, &istr); ist_uuid = to_gu_uuid(istr.uuid()); ist_seqno = istr.last_applied(); } do { tries++; gcs_seqno_t seqno_l; ret = gcs_.request_state_transfer(str_proto_ver_, req->req(), req->len(), sst_donor_, ist_uuid, ist_seqno, &seqno_l); if (ret < 0) { if (!retry_str(ret)) { log_error << "Requesting state transfer failed: " << ret << "(" << strerror(-ret) << ")"; } else if (1 == tries) { log_info << "Requesting state transfer failed: " << ret << "(" << strerror(-ret) << "). " << "Will keep retrying every " << sst_retry_sec_ << " second(s)"; } } if (seqno_l != GCS_SEQNO_ILL) { /* Check that we're not running out of space in monitor. */ if (local_monitor_.would_block(seqno_l)) { long const seconds = sst_retry_sec_ * tries; log_error << "We ran out of resources, seemingly because " << "we've been unsuccessfully requesting state " << "transfer for over " << seconds << " seconds. " << "Please check that there is " << "at least one fully synced member in the group. " << "Application must be restarted."; ret = -EDEADLK; } else { // we are already holding local monitor LocalOrder lo(seqno_l); local_monitor_.self_cancel(lo); } } } while (retry_str(ret) && (usleep(sst_retry_sec_ * 1000000), true)); if (ret >= 0) { if (1 == tries) { log_info << "Requesting state transfer: success, donor: " << ret; } else { log_info << "Requesting state transfer: success after " << tries << " tries, donor: " << ret; } } else { sst_state_ = SST_REQ_FAILED; st_.set(state_uuid_, STATE_SEQNO(), safe_to_bootstrap_); st_.mark_safe(); if (state_() > S_CLOSING) { log_fatal << "State transfer request failed unrecoverably: " << -ret << " (" << strerror(-ret) << "). Most likely " << "it is due to inability to communicate with the " << "cluster primary component. Restart required."; abort(); } else { // connection is being closed, send failure is expected } } } void ReplicatorSMM::request_state_transfer (void* recv_ctx, const wsrep_uuid_t& group_uuid, wsrep_seqno_t const group_seqno, const void* const sst_req, ssize_t const sst_req_len) { assert(sst_req_len >= 0); StateRequest* const req(prepare_state_request(sst_req, sst_req_len, group_uuid, group_seqno)); gu::Lock lock(sst_mutex_); st_.mark_unsafe(); send_state_request (req); state_.shift_to(S_JOINING); sst_state_ = SST_WAIT; /* while waiting for state transfer to complete is a good point * to reset gcache, since it may involve some IO too */ gcache_.seqno_reset(to_gu_uuid(group_uuid), group_seqno); if (sst_req_len != 0) { if (sst_is_trivial(sst_req, sst_req_len)) { sst_uuid_ = group_uuid; sst_seqno_ = group_seqno; } else { lock.wait(sst_cond_); } if (sst_uuid_ != group_uuid) { log_fatal << "Application received wrong state: " << "\n\tReceived: " << sst_uuid_ << "\n\tRequired: " << group_uuid; sst_state_ = SST_FAILED; log_fatal << "Application state transfer failed. This is " << "unrecoverable condition, restart required."; st_.set(sst_uuid_, sst_seqno_, safe_to_bootstrap_); st_.mark_safe(); abort(); } else { update_state_uuid (sst_uuid_); apply_monitor_.set_initial_position(-1); apply_monitor_.set_initial_position(sst_seqno_); if (co_mode_ != CommitOrder::BYPASS) { commit_monitor_.set_initial_position(-1); commit_monitor_.set_initial_position(sst_seqno_); } log_debug << "Installed new state: " << state_uuid_ << ":" << sst_seqno_; } } else { assert (state_uuid_ == group_uuid); } st_.mark_safe(); if (req->ist_len() > 0) { // IST is prepared only with str proto ver 1 and above if (STATE_SEQNO() < group_seqno) { log_info << "Receiving IST: " << (group_seqno - STATE_SEQNO()) << " writesets, seqnos " << STATE_SEQNO() << "-" << group_seqno; ist_receiver_.ready(); recv_IST(recv_ctx); sst_seqno_ = ist_receiver_.finished(); // Note: apply_monitor_ must be drained to avoid race between // IST appliers and GCS appliers, GCS action source may // provide actions that have already been applied. apply_monitor_.drain(sst_seqno_); log_info << "IST received: " << state_uuid_ << ":" << sst_seqno_; } else { (void)ist_receiver_.finished(); } } delete req; } void ReplicatorSMM::recv_IST(void* recv_ctx) { while (true) { TrxHandle* trx(0); int err; try { if ((err = ist_receiver_.recv(&trx)) == 0) { assert(trx != 0); TrxHandleLock lock(*trx); // Verify checksum before applying. This is also required // to synchronize with possible background checksum thread. trx->verify_checksum(); if (trx->depends_seqno() == -1) { ApplyOrder ao(*trx); apply_monitor_.self_cancel(ao); if (co_mode_ != CommitOrder::BYPASS) { CommitOrder co(*trx, co_mode_); commit_monitor_.self_cancel(co); } } else { // replicating and certifying stages have been // processed on donor, just adjust states here trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_CERTIFYING); apply_trx(recv_ctx, trx); GU_DBUG_SYNC_WAIT("recv_IST_after_apply_trx"); } } else { return; } trx->unref(); } catch (gu::Exception& e) { log_fatal << "receiving IST failed, node restart required: " << e.what(); if (trx) { log_fatal << "failed trx: " << *trx; } st_.mark_corrupt(); gcs_.close(); gu_abort(); } } } } /* namespace galera */ galera-3-25.3.20/galera/src/key_entry_os.hpp0000644000015300001660000001061013042054732020401 0ustar jenkinsjenkins// // Copyright (C) 2012 Codership Oy // #ifndef GALERA_KEY_ENTRY_OS_HPP #define GALERA_KEY_ENTRY_OS_HPP #include "key_os.hpp" namespace galera { class TrxHandle; class KeyEntryOS { public: KeyEntryOS(const KeyOS& row_key) : key_(row_key), ref_trx_(0), ref_full_trx_(0), ref_shared_trx_(0), ref_full_shared_trx_(0) {} template KeyEntryOS(int version, Ci begin, Ci end, uint8_t flags) : key_(version, begin, end, flags), ref_trx_(0), ref_full_trx_(0), ref_shared_trx_(0), ref_full_shared_trx_(0) {} KeyEntryOS(const KeyEntryOS& other) : key_(other.key_), ref_trx_(other.ref_trx_), ref_full_trx_(other.ref_full_trx_), ref_shared_trx_(other.ref_shared_trx_), ref_full_shared_trx_(other.ref_full_shared_trx_) {} ~KeyEntryOS() { assert(ref_trx_ == 0); assert(ref_full_trx_ == 0); assert(ref_shared_trx_ == 0); assert(ref_full_shared_trx_ == 0); } const KeyOS& get_key() const { return key_; } const KeyOS& get_key(int version) const { return key_; } void ref(TrxHandle* trx, bool full_key) { #ifndef NDEBUG assert_ref(trx, full_key); #endif /* NDEBUG */ ref_trx_ = trx; if (full_key == true) { ref_full_trx_ = trx; } } void unref(TrxHandle* trx, bool full_key) { assert(ref_trx_ != 0); if (ref_trx_ == trx) ref_trx_ = 0; if (full_key == true && ref_full_trx_ == trx) { ref_full_trx_ = 0; } else { #ifndef NDEBUG assert_unref(trx); #endif /* NDEBUG */ } } void ref_shared(TrxHandle* trx, bool full_key) { #ifndef NDEBUG assert_ref_shared(trx, full_key); #endif /* NDEBUG */ ref_shared_trx_ = trx; if (full_key == true) { ref_full_shared_trx_ = trx; } } void unref_shared(TrxHandle* trx, bool full_key) { assert(ref_shared_trx_ != 0); if (ref_shared_trx_ == trx) ref_shared_trx_ = 0; if (full_key == true && ref_full_shared_trx_ == trx) { ref_full_shared_trx_ = 0; } else { #ifndef NDEBUG assert_unref_shared(trx); #endif /* NDEBUG */ } } const TrxHandle* ref_trx() const { return ref_trx_; } const TrxHandle* ref_full_trx() const { return ref_full_trx_; } const TrxHandle* ref_shared_trx() const { return ref_shared_trx_; } const TrxHandle* ref_full_shared_trx() const { return ref_full_shared_trx_; } size_t size() const { return key_.size() + sizeof(*this); } private: void operator=(const KeyEntryOS&); KeyOS key_; TrxHandle* ref_trx_; TrxHandle* ref_full_trx_; TrxHandle* ref_shared_trx_; TrxHandle* ref_full_shared_trx_; #ifndef NDEBUG void assert_ref(TrxHandle*, bool) const; void assert_unref(TrxHandle*) const; void assert_ref_shared(TrxHandle*, bool) const; void assert_unref_shared(TrxHandle*) const; #endif /* NDEBUG */ }; class KeyEntryPtrHash { public: size_t operator()(const KeyEntryOS* const ke) const { return ke->get_key().hash(); } }; class KeyEntryPtrHashAll { public: size_t operator()(const KeyEntryOS* const ke) const { return ke->get_key().hash_with_flags(); } }; class KeyEntryPtrEqual { public: bool operator()(const KeyEntryOS* const left, const KeyEntryOS* const right) const { return left->get_key() == right->get_key(); } }; class KeyEntryPtrEqualAll { public: bool operator()(const KeyEntryOS* const left, const KeyEntryOS* const right) const { return left->get_key().equal_all(right->get_key()); } }; } #endif // GALERA_KEY_ENTRY_HPP galera-3-25.3.20/galera/src/trx_handle.cpp0000644000015300001660000002712313042054732020021 0ustar jenkinsjenkins// // Copyright (C) 2010-2013 Codership Oy // #include "trx_handle.hpp" #include "uuid.hpp" #include "galera_exception.hpp" #include "gu_serialize.hpp" const galera::TrxHandle::Params galera::TrxHandle::Defaults(".", -1, KeySet::MAX_VERSION); std::ostream& galera::operator<<(std::ostream& os, TrxHandle::State s) { switch (s) { case TrxHandle::S_EXECUTING: return (os << "EXECUTING"); case TrxHandle::S_MUST_ABORT: return (os << "MUST_ABORT"); case TrxHandle::S_ABORTING: return (os << "ABORTING"); case TrxHandle::S_REPLICATING: return (os << "REPLICATING"); case TrxHandle::S_CERTIFYING: return (os << "CERTIFYING"); case TrxHandle::S_MUST_CERT_AND_REPLAY: return (os << "MUST_CERT_AND_REPLAY"); case TrxHandle::S_MUST_REPLAY_AM: return (os << "MUST_REPLAY_AM"); case TrxHandle::S_MUST_REPLAY_CM: return (os << "MUST_REPLAY_CM"); case TrxHandle::S_MUST_REPLAY: return (os << "MUST_REPLAY"); case TrxHandle::S_REPLAYING: return (os << "REPLAYING"); case TrxHandle::S_APPLYING: return (os << "APPLYING"); case TrxHandle::S_COMMITTING: return (os << "COMMITTING"); case TrxHandle::S_COMMITTED: return (os << "COMMITTED"); case TrxHandle::S_ROLLED_BACK: return (os << "ROLLED_BACK"); } gu_throw_fatal << "invalid state " << static_cast(s); } std::ostream& galera::operator<<(std::ostream& os, const TrxHandle& th) { os << "source: " << th.source_id_ << " version: " << th.version_ << " local: " << th.local_ << " state: " << th.state_() << " flags: " << th.write_set_flags_ << " conn_id: " << int64_t(th.conn_id_) << " trx_id: " << int64_t(th.trx_id_) // for readability << " seqnos (l: " << th.local_seqno_ << ", g: " << th.global_seqno_ << ", s: " << th.last_seen_seqno_ << ", d: " << th.depends_seqno_ << ", ts: " << th.timestamp_ << ")"; if (th.write_set_in().annotated()) { os << "\nAnnotation:\n"; th.write_set_in().write_annotation(os); os << std::endl; } return os; } galera::TrxHandle::Fsm::TransMap galera::TrxHandle::trans_map_; static class TransMapBuilder { public: void add(galera::TrxHandle::State from, galera::TrxHandle::State to) { using galera::TrxHandle; using std::make_pair; typedef TrxHandle::Transition Transition; typedef TrxHandle::Fsm::TransAttr TransAttr; TrxHandle::Fsm::TransMap& trans_map(TrxHandle::trans_map_); trans_map.insert_unique(make_pair(Transition(from, to), TransAttr())); } TransMapBuilder() { using galera::TrxHandle; add(TrxHandle::S_EXECUTING, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_EXECUTING, TrxHandle::S_REPLICATING); add(TrxHandle::S_EXECUTING, TrxHandle::S_ROLLED_BACK); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_MUST_CERT_AND_REPLAY); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_MUST_REPLAY_AM); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_MUST_REPLAY_CM); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_MUST_REPLAY); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_ABORTING); add(TrxHandle::S_ABORTING, TrxHandle::S_ROLLED_BACK); add(TrxHandle::S_REPLICATING, TrxHandle::S_CERTIFYING); add(TrxHandle::S_REPLICATING, TrxHandle::S_MUST_CERT_AND_REPLAY); add(TrxHandle::S_REPLICATING, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_CERTIFYING, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_CERTIFYING, TrxHandle::S_APPLYING); add(TrxHandle::S_CERTIFYING, TrxHandle::S_MUST_CERT_AND_REPLAY); add(TrxHandle::S_CERTIFYING, TrxHandle::S_MUST_REPLAY_AM); // trx replay add(TrxHandle::S_APPLYING, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_APPLYING, TrxHandle::S_COMMITTING); add(TrxHandle::S_COMMITTING, TrxHandle::S_COMMITTED); add(TrxHandle::S_COMMITTING, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_MUST_CERT_AND_REPLAY, TrxHandle::S_CERTIFYING); add(TrxHandle::S_MUST_CERT_AND_REPLAY, TrxHandle::S_ABORTING); add(TrxHandle::S_MUST_REPLAY_AM, TrxHandle::S_MUST_REPLAY_CM); add(TrxHandle::S_MUST_REPLAY_CM, TrxHandle::S_MUST_REPLAY); add(TrxHandle::S_MUST_REPLAY, TrxHandle::S_REPLAYING); add(TrxHandle::S_REPLAYING, TrxHandle::S_COMMITTED); } } trans_map_builder_; size_t galera::TrxHandle::Mac::serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { // header: // type: 1 byte // len: 1 byte return gu::serialize2(uint16_t(0), buf, buflen, offset); } size_t galera::TrxHandle::Mac::unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { uint16_t hdr; offset = gu::unserialize2(buf, buflen, offset, hdr); switch ((hdr >> 8) & 0xff) { case 0: break; default: log_warn << "unrecognized mac type" << ((hdr >> 8) & 0xff); } // skip over the body offset += (hdr & 0xff); return offset; } size_t galera::TrxHandle::Mac::serial_size() const { return 2; // sizeof(uint16_t); // Hm, isn't is somewhat short for mac? } size_t galera::TrxHandle::serialize(gu::byte_t* buf, size_t buflen, size_t offset)const { if (new_version()) { assert(0); } // we don't use serialize for that uint32_t hdr((version_ << 24) | (write_set_flags_ & 0xff)); offset = gu::serialize4(hdr, buf, buflen, offset); offset = galera::serialize(source_id_, buf, buflen, offset); offset = gu::serialize8(conn_id_, buf, buflen, offset); offset = gu::serialize8(trx_id_, buf, buflen, offset); offset = gu::serialize8(last_seen_seqno_, buf, buflen, offset); offset = gu::serialize8(timestamp_, buf, buflen, offset); if (has_annotation()) { offset = gu::serialize4(annotation_, buf, buflen, offset); } if (has_mac()) { offset = mac_.serialize(buf, buflen, offset); } return offset; } size_t galera::TrxHandle::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { try { version_ = WriteSetNG::version(buf, buflen); switch (version_) { case 0: case 1: case 2: write_set_flags_ = buf[0]; write_set_.set_version(version_); offset = 4; offset = galera::unserialize(buf, buflen, offset, source_id_); offset = gu::unserialize8(buf, buflen, offset, conn_id_); offset = gu::unserialize8(buf, buflen, offset, trx_id_); assert(WSREP_SEQNO_UNDEFINED == last_seen_seqno_); offset = gu::unserialize8(buf, buflen, offset, last_seen_seqno_); assert(last_seen_seqno_ >= 0); offset = gu::unserialize8(buf, buflen, offset, timestamp_); if (has_annotation()) { offset = gu::unserialize4(buf, buflen, offset, annotation_); } if (has_mac()) { offset = mac_.unserialize(buf, buflen, offset); } set_write_set_buffer(buf + offset, buflen - offset); break; case 3: write_set_in_.read_buf (buf, buflen); write_set_flags_ = wsng_flags_to_trx_flags(write_set_in_.flags()); source_id_ = write_set_in_.source_id(); conn_id_ = write_set_in_.conn_id(); trx_id_ = write_set_in_.trx_id(); #ifndef NDEBUG write_set_in_.verify_checksum(); if (local_) assert(write_set_in_.last_seen() == last_seen_seqno_); else assert(WSREP_SEQNO_UNDEFINED == last_seen_seqno_); #endif if (write_set_in_.certified()) { assert(!local_); assert(WSREP_SEQNO_UNDEFINED == last_seen_seqno_); write_set_flags_ |= F_PREORDERED; } else { last_seen_seqno_ = write_set_in_.last_seen(); assert(last_seen_seqno_ >= 0); } timestamp_ = write_set_in_.timestamp(); break; default: gu_throw_error(EPROTONOSUPPORT); } return buflen; } catch (gu::Exception& e) { GU_TRACE(e); log_fatal << "Writeset deserialization failed: " << e.what() << std::endl << "WS flags: " << write_set_flags_ << std::endl << "Trx proto: " << version_ << std::endl << "Trx source: " << source_id_ << std::endl << "Trx conn_id: " << conn_id_ << std::endl << "Trx trx_id: " << trx_id_ << std::endl << "Trx last_seen: " << last_seen_seqno_; throw; } } size_t galera::TrxHandle::serial_size() const { assert (new_version() == false); return (4 // hdr + galera::serial_size(source_id_) + 8 // serial_size(trx.conn_id_) + 8 // serial_size(trx.trx_id_) + 8 // serial_size(trx.last_seen_seqno_) + 8 // serial_size(trx.timestamp_) + (has_annotation() ? gu::serial_size4(annotation_) : 0) + (has_mac() ? mac_.serial_size() : 0)); } void galera::TrxHandle::apply (void* recv_ctx, wsrep_apply_cb_t apply_cb, const wsrep_trx_meta_t& meta) const { wsrep_cb_status_t err(WSREP_CB_SUCCESS); if (new_version()) { const DataSetIn& ws(write_set_in_.dataset()); ws.rewind(); // make sure we always start from the beginning for (ssize_t i = 0; WSREP_CB_SUCCESS == err && i < ws.count(); ++i) { gu::Buf buf = ws.next(); err = apply_cb (recv_ctx, buf.ptr, buf.size, trx_flags_to_wsrep_flags(flags()), &meta); } } else { const gu::byte_t* buf(write_set_buffer().first); const size_t buf_len(write_set_buffer().second); size_t offset(0); while (offset < buf_len && WSREP_CB_SUCCESS == err) { // Skip key segment std::pair k( galera::WriteSet::segment(buf, buf_len, offset)); offset = k.first + k.second; // Data part std::pair d( galera::WriteSet::segment(buf, buf_len, offset)); offset = d.first + d.second; err = apply_cb (recv_ctx, buf + d.first, d.second, trx_flags_to_wsrep_flags(flags()), &meta); } assert(offset == buf_len); } if (gu_unlikely(err > 0)) { std::ostringstream os; os << "Failed to apply app buffer: seqno: " << global_seqno() << ", status: " << err; galera::ApplyException ae(os.str(), err); GU_TRACE(ae); throw ae; } return; } /* we don't care about any failures in applying unordered events */ void galera::TrxHandle::unordered(void* recv_ctx, wsrep_unordered_cb_t cb) const { if (new_version() && NULL != cb && write_set_in_.unrdset().count() > 0) { const DataSetIn& unrd(write_set_in_.unrdset()); for (int i(0); i < unrd.count(); ++i) { const gu::Buf data = unrd.next(); cb(recv_ctx, data.ptr, data.size); } } } galera-3-25.3.20/galera/src/galera_service_thd.hpp0000644000015300001660000000343313042054732021506 0ustar jenkinsjenkins/* * Copyright (C) 2010-2013 Codership Oy */ #ifndef GALERA_SERVICE_THD_HPP #define GALERA_SERVICE_THD_HPP #include "galera_gcs.hpp" #include #include // gu::Mutex and gu::Cond namespace galera { class ServiceThd { public: ServiceThd (GcsI& gcs, gcache::GCache& gcache); ~ServiceThd (); /*! flush all ongoing operations (before processing CC) */ void flush (); /*! reset to initial state before gcs (re)connect */ void reset(); /* !!! * The following methods must be invoked only within a monitor, * so that monitors drain during CC ensures that no outdated * actions are scheduled with the service thread after that. * !!! */ /*! schedule seqno to be reported as last committed */ void report_last_committed (gcs_seqno_t seqno); /*! release write sets up to and including seqno */ void release_seqno (gcs_seqno_t seqno); private: static const uint32_t A_NONE; struct Data { gcs_seqno_t last_committed_; gcs_seqno_t release_seqno_; uint32_t act_; Data() : last_committed_(0), release_seqno_ (0), act_ (A_NONE) {} }; gcache::GCache& gcache_; GcsI& gcs_; gu_thread_t thd_; gu::Mutex mtx_; gu::Cond cond_; // service request condition gu::Cond flush_; // flush condition Data data_; static void* thd_func (void*); ServiceThd (const ServiceThd&); ServiceThd& operator= (const ServiceThd&); }; } #endif /* GALERA_SERVICE_THD_HPP */ galera-3-25.3.20/galera/src/data_set.cpp0000644000015300001660000000013013042054732017442 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #include "data_set.hpp" galera-3-25.3.20/galera/src/galera_info.cpp0000644000015300001660000000531413042054732020135 0ustar jenkinsjenkins// Copyright (C) 2009-2013 Codership Oy #include "galera_info.hpp" #include #include static size_t view_info_size (int members) { return (sizeof(wsrep_view_info_t) + members * sizeof(wsrep_member_info_t)); } /* create view info out of configuration message */ wsrep_view_info_t* galera_view_info_create (const gcs_act_conf_t* conf, bool st_required) { wsrep_view_info_t* ret = static_cast( malloc(view_info_size(conf ? conf->memb_num : 0))); if (ret) { if (conf) { const char* str = conf->data; int m; wsrep_uuid_t uuid; memcpy(uuid.data, conf->uuid, sizeof(uuid.data)); wsrep_seqno_t seqno = conf->seqno != GCS_SEQNO_ILL ? conf->seqno : WSREP_SEQNO_UNDEFINED; wsrep_gtid_t gtid = { uuid, seqno }; ret->state_id = gtid; ret->view = conf->conf_id; ret->status = conf->conf_id != -1 ? WSREP_VIEW_PRIMARY : WSREP_VIEW_NON_PRIMARY; ret->state_gap = st_required; ret->my_idx = conf->my_idx; ret->memb_num = conf->memb_num; ret->proto_ver = conf->appl_proto_ver; for (m = 0; m < ret->memb_num; m++) { wsrep_member_info_t* member = &ret->members[m]; size_t id_len = strlen(str); gu_uuid_scan (str, id_len, reinterpret_cast(&member->id)); str = str + id_len + 1; strncpy(member->name, str, sizeof(member->name) - 1); member->name[sizeof(member->name) - 1] = '\0'; str = str + strlen(str) + 1; strncpy(member->incoming, str, sizeof(member->incoming) - 1); member->incoming[sizeof(member->incoming) - 1] = '\0'; str = str + strlen(str) + 1; str += sizeof(gcs_seqno_t); // skip cached seqno. } } else { memset(&ret->state_id, 0, sizeof(ret->state_id)); ret->view = -1; ret->status = WSREP_VIEW_NON_PRIMARY; ret->state_gap = false; ret->my_idx = -1; ret->memb_num = 0; ret->proto_ver = -1; } } return ret; } /* make a copy of view info object */ wsrep_view_info_t* galera_view_info_copy (const wsrep_view_info_t* vi) { size_t ret_size = view_info_size (vi->memb_num); wsrep_view_info_t* ret = static_cast(malloc (ret_size)); if (ret) { memcpy (ret, vi, ret_size); } return ret; } galera-3-25.3.20/galera/src/replicator_smm.cpp0000644000015300001660000015626213042054732020720 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "galera_common.hpp" #include "replicator_smm.hpp" #include "galera_exception.hpp" #include "uuid.hpp" #include "galera_info.hpp" #include #include #include #include static void apply_trx_ws(void* recv_ctx, wsrep_apply_cb_t apply_cb, wsrep_commit_cb_t commit_cb, const galera::TrxHandle& trx, const wsrep_trx_meta_t& meta) { using galera::TrxHandle; static const size_t max_apply_attempts(4); size_t attempts(1); do { try { if (trx.is_toi()) { log_debug << "Executing TO isolated action: " << trx; } gu_trace(trx.apply(recv_ctx, apply_cb, meta)); if (trx.is_toi()) { log_debug << "Done executing TO isolated action: " << trx.global_seqno(); } break; } catch (galera::ApplyException& e) { if (trx.is_toi()) { log_warn << "Ignoring error for TO isolated action: " << trx; break; } else { int const err(e.status()); if (err > 0) { wsrep_bool_t unused(false); wsrep_cb_status const rcode( commit_cb( recv_ctx, TrxHandle::trx_flags_to_wsrep_flags(trx.flags()), &meta, &unused, false)); if (WSREP_CB_SUCCESS != rcode) { gu_throw_fatal << "Rollback failed. Trx: " << trx; } ++attempts; if (attempts <= max_apply_attempts) { log_warn << e.what() << "\nRetrying " << attempts << "th time"; } } else { GU_TRACE(e); throw; } } } } while (attempts <= max_apply_attempts); if (gu_unlikely(attempts > max_apply_attempts)) { std::ostringstream msg; msg << "Failed to apply trx " << trx.global_seqno() << " " << max_apply_attempts << " times"; throw galera::ApplyException(msg.str(), WSREP_CB_FAILURE); } return; } std::ostream& galera::operator<<(std::ostream& os, ReplicatorSMM::State state) { switch (state) { case ReplicatorSMM::S_DESTROYED: return (os << "DESTROYED"); case ReplicatorSMM::S_CLOSED: return (os << "CLOSED"); case ReplicatorSMM::S_CLOSING: return (os << "CLOSING"); case ReplicatorSMM::S_CONNECTED: return (os << "CONNECTED"); case ReplicatorSMM::S_JOINING: return (os << "JOINING"); case ReplicatorSMM::S_JOINED: return (os << "JOINED"); case ReplicatorSMM::S_SYNCED: return (os << "SYNCED"); case ReplicatorSMM::S_DONOR: return (os << "DONOR"); } gu_throw_fatal << "invalid state " << static_cast(state); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// // Public ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// galera::ReplicatorSMM::ReplicatorSMM(const struct wsrep_init_args* args) : init_lib_ (reinterpret_cast(args->logger_cb)), config_ (), init_config_ (config_, args->node_address, args->data_dir), parse_options_ (*this, config_, args->options), init_ssl_ (config_), str_proto_ver_ (-1), protocol_version_ (-1), proto_max_ (gu::from_string(config_.get(Param::proto_max))), state_ (S_CLOSED), sst_state_ (SST_NONE), co_mode_ (CommitOrder::from_string( config_.get(Param::commit_order))), state_file_ (config_.get(BASE_DIR)+'/'+GALERA_STATE_FILE), st_ (state_file_), safe_to_bootstrap_ (true), trx_params_ (config_.get(BASE_DIR), -1, KeySet::version(config_.get(Param::key_format)), gu::from_string(config_.get( Param::max_write_set_size))), uuid_ (WSREP_UUID_UNDEFINED), state_uuid_ (WSREP_UUID_UNDEFINED), state_uuid_str_ (), cc_seqno_ (WSREP_SEQNO_UNDEFINED), pause_seqno_ (WSREP_SEQNO_UNDEFINED), app_ctx_ (args->app_ctx), view_cb_ (args->view_handler_cb), apply_cb_ (args->apply_cb), commit_cb_ (args->commit_cb), unordered_cb_ (args->unordered_cb), sst_donate_cb_ (args->sst_donate_cb), synced_cb_ (args->synced_cb), sst_donor_ (), sst_uuid_ (WSREP_UUID_UNDEFINED), sst_seqno_ (WSREP_SEQNO_UNDEFINED), sst_mutex_ (), sst_cond_ (), sst_retry_sec_ (1), gcache_ (config_, config_.get(BASE_DIR)), gcs_ (config_, gcache_, proto_max_, args->proto_ver, args->node_name, args->node_incoming), service_thd_ (gcs_, gcache_), slave_pool_ (sizeof(TrxHandle), 1024, "SlaveTrxHandle"), as_ (0), gcs_as_ (slave_pool_, gcs_, *this, gcache_), ist_receiver_ (config_, slave_pool_, args->node_address), ist_senders_ (gcs_, gcache_), wsdb_ (), cert_ (config_, service_thd_), local_monitor_ (), apply_monitor_ (), commit_monitor_ (), causal_read_timeout_(config_.get(Param::causal_read_timeout)), receivers_ (), replicated_ (), replicated_bytes_ (), keys_count_ (), keys_bytes_ (), data_bytes_ (), unrd_bytes_ (), local_commits_ (), local_rollbacks_ (), local_cert_failures_(), local_replays_ (), causal_reads_ (), preordered_id_ (), incoming_list_ (""), incoming_mutex_ (), wsrep_stats_ () { // @todo add guards (and perhaps actions) state_.add_transition(Transition(S_CLOSED, S_DESTROYED)); state_.add_transition(Transition(S_CLOSED, S_CONNECTED)); state_.add_transition(Transition(S_CLOSING, S_CLOSED)); state_.add_transition(Transition(S_CONNECTED, S_CLOSING)); state_.add_transition(Transition(S_CONNECTED, S_CONNECTED)); state_.add_transition(Transition(S_CONNECTED, S_JOINING)); // the following is possible only when bootstrapping new cluster // (trivial wsrep_cluster_address) state_.add_transition(Transition(S_CONNECTED, S_JOINED)); // the following are possible on PC remerge state_.add_transition(Transition(S_CONNECTED, S_DONOR)); state_.add_transition(Transition(S_CONNECTED, S_SYNCED)); state_.add_transition(Transition(S_JOINING, S_CLOSING)); // the following is possible if one non-prim conf follows another state_.add_transition(Transition(S_JOINING, S_CONNECTED)); state_.add_transition(Transition(S_JOINING, S_JOINED)); state_.add_transition(Transition(S_JOINED, S_CLOSING)); state_.add_transition(Transition(S_JOINED, S_CONNECTED)); state_.add_transition(Transition(S_JOINED, S_SYNCED)); // the following is possible if one desync() immediately follows another state_.add_transition(Transition(S_JOINED, S_DONOR)); state_.add_transition(Transition(S_SYNCED, S_CLOSING)); state_.add_transition(Transition(S_SYNCED, S_CONNECTED)); state_.add_transition(Transition(S_SYNCED, S_DONOR)); state_.add_transition(Transition(S_DONOR, S_CLOSING)); state_.add_transition(Transition(S_DONOR, S_CONNECTED)); state_.add_transition(Transition(S_DONOR, S_JOINED)); local_monitor_.set_initial_position(0); wsrep_uuid_t uuid; wsrep_seqno_t seqno; st_.get (uuid, seqno, safe_to_bootstrap_); if (0 != args->state_id && args->state_id->uuid != WSREP_UUID_UNDEFINED && args->state_id->uuid == uuid && seqno == WSREP_SEQNO_UNDEFINED) { /* non-trivial recovery information provided on startup, and db is safe * so use recovered seqno value */ seqno = args->state_id->seqno; } log_debug << "End state: " << uuid << ':' << seqno << " #################"; update_state_uuid (uuid); gcache_.seqno_reset(to_gu_uuid(uuid), seqno); // update gcache position to one supplied by app. cc_seqno_ = seqno; // is it needed here? // the following initialization is needed only to pass seqno to // connect() call. Ideally this should be done only on receving conf change. apply_monitor_.set_initial_position(seqno); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.set_initial_position(seqno); cert_.assign_initial_position(seqno, trx_proto_ver()); build_stats_vars(wsrep_stats_); } galera::ReplicatorSMM::~ReplicatorSMM() { log_info << "dtor state: " << state_(); switch (state_()) { case S_CONNECTED: case S_JOINING: case S_JOINED: case S_SYNCED: case S_DONOR: close(); case S_CLOSING: // @todo wait that all users have left the building case S_CLOSED: ist_senders_.cancel(); break; case S_DESTROYED: break; } } wsrep_status_t galera::ReplicatorSMM::connect(const std::string& cluster_name, const std::string& cluster_url, const std::string& state_donor, bool const bootstrap) { sst_donor_ = state_donor; service_thd_.reset(); ssize_t err; wsrep_status_t ret(WSREP_OK); wsrep_seqno_t const seqno(STATE_SEQNO()); wsrep_uuid_t const gcs_uuid(seqno < 0 ? WSREP_UUID_UNDEFINED :state_uuid_); log_info << "Setting initial position to " << gcs_uuid << ':' << seqno; if ((bootstrap == true || cluster_url == "gcomm://") && safe_to_bootstrap_ == false) { log_error << "It may not be safe to bootstrap the cluster from this node. " << "It was not the last one to leave the cluster and may " << "not contain all the updates. To force cluster bootstrap " << "with this node, edit the grastate.dat file manually and " << "set safe_to_bootstrap to 1 ."; ret = WSREP_NODE_FAIL; } if (ret == WSREP_OK && (err = gcs_.set_initial_position(gcs_uuid, seqno)) != 0) { log_error << "gcs init failed:" << strerror(-err); ret = WSREP_NODE_FAIL; } if (ret == WSREP_OK && (err = gcs_.connect(cluster_name, cluster_url, bootstrap)) != 0) { log_error << "gcs connect failed: " << strerror(-err); ret = WSREP_NODE_FAIL; } if (ret == WSREP_OK) { state_.shift_to(S_CONNECTED); } return ret; } wsrep_status_t galera::ReplicatorSMM::close() { if (state_() != S_CLOSED) { gcs_.close(); } return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::async_recv(void* recv_ctx) { assert(recv_ctx != 0); if (state_() == S_CLOSED || state_() == S_CLOSING) { log_error <<"async recv cannot start, provider in closed/closing state"; return WSREP_FATAL; } ++receivers_; as_ = &gcs_as_; bool exit_loop(false); wsrep_status_t retval(WSREP_OK); while (WSREP_OK == retval && state_() != S_CLOSING) { ssize_t rc; while (gu_unlikely((rc = as_->process(recv_ctx, exit_loop)) == -ECANCELED)) { recv_IST(recv_ctx); // hack: prevent fast looping until ist controlling thread // resumes gcs prosessing usleep(10000); } if (gu_unlikely(rc <= 0)) { retval = WSREP_CONN_FAIL; } else if (gu_unlikely(exit_loop == true)) { assert(WSREP_OK == retval); if (receivers_.sub_and_fetch(1) > 0) { log_info << "Slave thread exiting on request."; break; } ++receivers_; log_warn << "Refusing exit for the last slave thread."; } } /* exiting loop already did proper checks */ if (!exit_loop && receivers_.sub_and_fetch(1) == 0) { if (state_() != S_CLOSING) { if (retval == WSREP_OK) { log_warn << "Broken shutdown sequence, provider state: " << state_() << ", retval: " << retval; assert (0); } else { // Generate zero view before exit to notify application wsrep_view_info_t* err_view(galera_view_info_create(0, false)); void* fake_sst_req(0); size_t fake_sst_req_len(0); view_cb_(app_ctx_, recv_ctx, err_view, 0, 0, &fake_sst_req, &fake_sst_req_len); free(err_view); } /* avoid abort in production */ state_.shift_to(S_CLOSING); } state_.shift_to(S_CLOSED); } log_debug << "Slave thread exit. Return code: " << retval; return retval; } void galera::ReplicatorSMM::apply_trx(void* recv_ctx, TrxHandle* trx) { assert(trx != 0); assert(trx->global_seqno() > 0); assert(trx->is_certified() == true); assert(trx->global_seqno() > STATE_SEQNO()); assert(trx->is_local() == false); ApplyOrder ao(*trx); CommitOrder co(*trx, co_mode_); gu_trace(apply_monitor_.enter(ao)); trx->set_state(TrxHandle::S_APPLYING); wsrep_trx_meta_t meta = {{state_uuid_, trx->global_seqno() }, trx->depends_seqno()}; gu_trace(apply_trx_ws(recv_ctx, apply_cb_, commit_cb_, *trx, meta)); /* at this point any exception in apply_trx_ws() is fatal, not * catching anything. */ if (gu_likely(co_mode_ != CommitOrder::BYPASS)) { gu_trace(commit_monitor_.enter(co)); } trx->set_state(TrxHandle::S_COMMITTING); wsrep_bool_t exit_loop(false); wsrep_cb_status_t const rcode( commit_cb_( recv_ctx, TrxHandle::trx_flags_to_wsrep_flags(trx->flags()), &meta, &exit_loop, true)); if (gu_unlikely (rcode != WSREP_CB_SUCCESS)) gu_throw_fatal << "Commit failed. Trx: " << trx; if (gu_likely(co_mode_ != CommitOrder::BYPASS)) { commit_monitor_.leave(co); } trx->set_state(TrxHandle::S_COMMITTED); if (trx->local_seqno() != -1) { // trx with local seqno -1 originates from IST (or other source not gcs) report_last_committed(cert_.set_trx_committed(trx)); } /* For now need to keep it inside apply monitor to ensure all processing * ends by the time monitors are drained because of potential gcache * cleanup (and loss of the writeset buffer). Perhaps unordered monitor * is needed here. */ trx->unordered(recv_ctx, unordered_cb_); apply_monitor_.leave(ao); trx->set_exit_loop(exit_loop); } wsrep_status_t galera::ReplicatorSMM::replicate(TrxHandle* trx, wsrep_trx_meta_t* meta) { if (state_() < S_JOINED) return WSREP_TRX_FAIL; assert(trx->state() == TrxHandle::S_EXECUTING || trx->state() == TrxHandle::S_MUST_ABORT); assert(trx->local_seqno() == WSREP_SEQNO_UNDEFINED && trx->global_seqno() == WSREP_SEQNO_UNDEFINED); wsrep_status_t retval(WSREP_TRX_FAIL); if (trx->state() == TrxHandle::S_MUST_ABORT) { must_abort: trx->set_state(TrxHandle::S_ABORTING); return retval; } WriteSetNG::GatherVector actv; gcs_action act; act.type = GCS_ACT_TORDERED; #ifndef NDEBUG act.seqno_g = GCS_SEQNO_ILL; #endif if (trx->new_version()) { act.buf = NULL; act.size = trx->write_set_out().gather(trx->source_id(), trx->conn_id(), trx->trx_id(), actv); } else { trx->set_last_seen_seqno(last_committed()); assert (trx->last_seen_seqno() >= 0); trx->flush(0); const MappedBuffer& wscoll(trx->write_set_collection()); act.buf = &wscoll[0]; act.size = wscoll.size(); assert (act.buf != NULL); assert (act.size > 0); } trx->set_state(TrxHandle::S_REPLICATING); ssize_t rcode(-1); do { assert(act.seqno_g == GCS_SEQNO_ILL); const ssize_t gcs_handle(gcs_.schedule()); if (gu_unlikely(gcs_handle < 0)) { log_debug << "gcs schedule " << strerror(-gcs_handle); trx->set_state(TrxHandle::S_MUST_ABORT); goto must_abort; } trx->set_gcs_handle(gcs_handle); if (trx->new_version()) { trx->set_last_seen_seqno(last_committed()); assert(trx->last_seen_seqno() >= 0); trx->unlock(); assert (act.buf == NULL); // just a sanity check rcode = gcs_.replv(actv, act, true); } else { assert(trx->last_seen_seqno() >= 0); trx->unlock(); assert (act.buf != NULL); rcode = gcs_.repl(act, true); } GU_DBUG_SYNC_WAIT("after_replicate_sync") trx->lock(); } while (rcode == -EAGAIN && trx->state() != TrxHandle::S_MUST_ABORT && (usleep(1000), true)); assert(trx->last_seen_seqno() >= 0); if (rcode < 0) { if (rcode != -EINTR) { log_debug << "gcs_repl() failed with " << strerror(-rcode) << " for trx " << *trx; } assert(rcode != -EINTR || trx->state() == TrxHandle::S_MUST_ABORT); assert(act.seqno_l == GCS_SEQNO_ILL && act.seqno_g == GCS_SEQNO_ILL); assert(NULL == act.buf || !trx->new_version()); if (trx->state() != TrxHandle::S_MUST_ABORT) { trx->set_state(TrxHandle::S_MUST_ABORT); } trx->set_gcs_handle(-1); goto must_abort; } assert(act.buf != NULL); assert(act.size == rcode); assert(act.seqno_l != GCS_SEQNO_ILL); assert(act.seqno_g != GCS_SEQNO_ILL); ++replicated_; replicated_bytes_ += rcode; trx->set_gcs_handle(-1); if (trx->new_version()) { gu_trace(trx->unserialize(static_cast(act.buf), act.size, 0)); trx->update_stats(keys_count_, keys_bytes_, data_bytes_, unrd_bytes_); } trx->set_received(act.buf, act.seqno_l, act.seqno_g); if (trx->state() == TrxHandle::S_MUST_ABORT) { retval = cert_for_aborted(trx); if (retval != WSREP_BF_ABORT) { LocalOrder lo(*trx); ApplyOrder ao(*trx); CommitOrder co(*trx, co_mode_); local_monitor_.self_cancel(lo); apply_monitor_.self_cancel(ao); if (co_mode_ !=CommitOrder::BYPASS) commit_monitor_.self_cancel(co); } else if (meta != 0) { meta->gtid.uuid = state_uuid_; meta->gtid.seqno = trx->global_seqno(); meta->depends_on = trx->depends_seqno(); } if (trx->state() == TrxHandle::S_MUST_ABORT) goto must_abort; } else { retval = WSREP_OK; } assert(trx->last_seen_seqno() >= 0); return retval; } void galera::ReplicatorSMM::abort_trx(TrxHandle* trx) { assert(trx != 0); assert(trx->is_local() == true); log_debug << "aborting trx " << *trx << " " << trx; switch (trx->state()) { case TrxHandle::S_MUST_ABORT: case TrxHandle::S_ABORTING: // guess this is here because we can have a race return; case TrxHandle::S_EXECUTING: trx->set_state(TrxHandle::S_MUST_ABORT); break; case TrxHandle::S_REPLICATING: { trx->set_state(TrxHandle::S_MUST_ABORT); // trx is in gcs repl int rc; if (trx->gcs_handle() > 0 && ((rc = gcs_.interrupt(trx->gcs_handle()))) != 0) { log_debug << "gcs_interrupt(): handle " << trx->gcs_handle() << " trx id " << trx->trx_id() << ": " << strerror(-rc); } break; } case TrxHandle::S_CERTIFYING: { trx->set_state(TrxHandle::S_MUST_ABORT); // trx is waiting in local monitor LocalOrder lo(*trx); trx->unlock(); local_monitor_.interrupt(lo); trx->lock(); break; } case TrxHandle::S_APPLYING: { trx->set_state(TrxHandle::S_MUST_ABORT); // trx is waiting in apply monitor ApplyOrder ao(*trx); trx->unlock(); apply_monitor_.interrupt(ao); trx->lock(); break; } case TrxHandle::S_COMMITTING: trx->set_state(TrxHandle::S_MUST_ABORT); if (co_mode_ != CommitOrder::BYPASS) { // trx waiting in commit monitor CommitOrder co(*trx, co_mode_); trx->unlock(); commit_monitor_.interrupt(co); trx->lock(); } break; default: gu_throw_fatal << "invalid state " << trx->state(); } } wsrep_status_t galera::ReplicatorSMM::pre_commit(TrxHandle* trx, wsrep_trx_meta_t* meta) { assert(trx->state() == TrxHandle::S_REPLICATING); assert(trx->local_seqno() > -1); assert(trx->global_seqno() > -1); assert(trx->last_seen_seqno() >= 0); if (meta != 0) { meta->gtid.uuid = state_uuid_; meta->gtid.seqno = trx->global_seqno(); meta->depends_on = trx->depends_seqno(); } // State should not be checked here: If trx has been replicated, // it has to be certified and potentially applied. #528 // if (state_() < S_JOINED) return WSREP_TRX_FAIL; wsrep_status_t retval(cert_and_catch(trx)); if (gu_unlikely(retval != WSREP_OK)) { assert(trx->state() == TrxHandle::S_MUST_ABORT || trx->state() == TrxHandle::S_MUST_REPLAY_AM || trx->state() == TrxHandle::S_MUST_CERT_AND_REPLAY); if (trx->state() == TrxHandle::S_MUST_ABORT) { trx->set_state(TrxHandle::S_ABORTING); } return retval; } assert(trx->state() == TrxHandle::S_CERTIFYING); assert(trx->global_seqno() > STATE_SEQNO()); trx->set_state(TrxHandle::S_APPLYING); ApplyOrder ao(*trx); CommitOrder co(*trx, co_mode_); bool interrupted(false); try { gu_trace(apply_monitor_.enter(ao)); } catch (gu::Exception& e) { if (e.get_errno() == EINTR) { interrupted = true; } else throw; } if (gu_unlikely(interrupted) || trx->state() == TrxHandle::S_MUST_ABORT) { assert(trx->state() == TrxHandle::S_MUST_ABORT); if (interrupted) trx->set_state(TrxHandle::S_MUST_REPLAY_AM); else trx->set_state(TrxHandle::S_MUST_REPLAY_CM); retval = WSREP_BF_ABORT; } else if ((trx->flags() & TrxHandle::F_COMMIT) != 0) { trx->set_state(TrxHandle::S_COMMITTING); if (co_mode_ != CommitOrder::BYPASS) { try { gu_trace(commit_monitor_.enter(co)); } catch (gu::Exception& e) { if (e.get_errno() == EINTR) { interrupted = true; } else throw; } if (gu_unlikely(interrupted) || trx->state() == TrxHandle::S_MUST_ABORT) { assert(trx->state() == TrxHandle::S_MUST_ABORT); if (interrupted) trx->set_state(TrxHandle::S_MUST_REPLAY_CM); else trx->set_state(TrxHandle::S_MUST_REPLAY); retval = WSREP_BF_ABORT; } } } else { trx->set_state(TrxHandle::S_EXECUTING); } assert((retval == WSREP_OK && (trx->state() == TrxHandle::S_COMMITTING || trx->state() == TrxHandle::S_EXECUTING)) || (retval == WSREP_TRX_FAIL && trx->state() == TrxHandle::S_ABORTING) || (retval == WSREP_BF_ABORT && ( trx->state() == TrxHandle::S_MUST_REPLAY_AM || trx->state() == TrxHandle::S_MUST_REPLAY_CM || trx->state() == TrxHandle::S_MUST_REPLAY))); return retval; } wsrep_status_t galera::ReplicatorSMM::replay_trx(TrxHandle* trx, void* trx_ctx) { assert(trx->state() == TrxHandle::S_MUST_CERT_AND_REPLAY || trx->state() == TrxHandle::S_MUST_REPLAY_AM || trx->state() == TrxHandle::S_MUST_REPLAY_CM || trx->state() == TrxHandle::S_MUST_REPLAY); assert(trx->trx_id() != static_cast(-1)); assert(trx->global_seqno() > STATE_SEQNO()); wsrep_status_t retval(WSREP_OK); switch (trx->state()) { case TrxHandle::S_MUST_CERT_AND_REPLAY: retval = cert_and_catch(trx); if (retval != WSREP_OK) { // apply monitor is self canceled in cert break; } trx->set_state(TrxHandle::S_MUST_REPLAY_AM); // fall through case TrxHandle::S_MUST_REPLAY_AM: { // safety measure to make sure that all preceding trxs finish before // replaying trx->set_depends_seqno(trx->global_seqno() - 1); ApplyOrder ao(*trx); gu_trace(apply_monitor_.enter(ao)); trx->set_state(TrxHandle::S_MUST_REPLAY_CM); // fall through } case TrxHandle::S_MUST_REPLAY_CM: if (co_mode_ != CommitOrder::BYPASS) { CommitOrder co(*trx, co_mode_); gu_trace(commit_monitor_.enter(co)); } trx->set_state(TrxHandle::S_MUST_REPLAY); // fall through case TrxHandle::S_MUST_REPLAY: ++local_replays_; trx->set_state(TrxHandle::S_REPLAYING); try { wsrep_trx_meta_t meta = {{state_uuid_, trx->global_seqno() }, trx->depends_seqno()}; gu_trace(apply_trx_ws(trx_ctx, apply_cb_, commit_cb_, *trx, meta)); wsrep_bool_t unused(false); wsrep_cb_status_t rcode( commit_cb_( trx_ctx, TrxHandle::trx_flags_to_wsrep_flags(trx->flags()), &meta, &unused, true)); if (gu_unlikely(rcode != WSREP_CB_SUCCESS)) gu_throw_fatal << "Commit failed. Trx: " << trx; } catch (gu::Exception& e) { st_.mark_corrupt(); throw; } // apply, commit monitors are released in post commit return WSREP_OK; default: gu_throw_fatal << "Invalid state in replay for trx " << *trx; } log_debug << "replaying failed for trx " << *trx; trx->set_state(TrxHandle::S_ABORTING); return retval; } wsrep_status_t galera::ReplicatorSMM::post_commit(TrxHandle* trx) { if (trx->state() == TrxHandle::S_MUST_ABORT) { // This is possible in case of ALG: BF applier BF aborts // trx that has already grabbed commit monitor and is committing. // However, this should be acceptable assuming that commit // operation does not reserve any more resources and is able // to release already reserved resources. log_debug << "trx was BF aborted during commit: " << *trx; // manipulate state to avoid crash trx->set_state(TrxHandle::S_MUST_REPLAY); trx->set_state(TrxHandle::S_REPLAYING); } assert(trx->state() == TrxHandle::S_COMMITTING || trx->state() == TrxHandle::S_REPLAYING); assert(trx->local_seqno() > -1 && trx->global_seqno() > -1); CommitOrder co(*trx, co_mode_); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.leave(co); ApplyOrder ao(*trx); report_last_committed(cert_.set_trx_committed(trx)); apply_monitor_.leave(ao); trx->set_state(TrxHandle::S_COMMITTED); ++local_commits_; return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::post_rollback(TrxHandle* trx) { if (trx->state() == TrxHandle::S_MUST_ABORT) { trx->set_state(TrxHandle::S_ABORTING); } assert(trx->state() == TrxHandle::S_ABORTING || trx->state() == TrxHandle::S_EXECUTING); trx->set_state(TrxHandle::S_ROLLED_BACK); // Trx was either rolled back by user or via certification failure, // last committed report not needed since cert index state didn't change. // report_last_committed(); ++local_rollbacks_; return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::causal_read(wsrep_gtid_t* gtid) { wsrep_seqno_t cseq(static_cast(gcs_.caused())); if (cseq < 0) { log_warn << "gcs_caused() returned " << cseq << " (" << strerror(-cseq) << ')'; return WSREP_TRX_FAIL; } try { // @note: Using timed wait for monitor is currently a hack // to avoid deadlock resulting from race between monitor wait // and drain during configuration change. Instead of this, // monitor should have proper mechanism to interrupt waiters // at monitor drain and disallowing further waits until // configuration change related operations (SST etc) have been // finished. gu::datetime::Date wait_until(gu::datetime::Date::calendar() + causal_read_timeout_); if (gu_likely(co_mode_ != CommitOrder::BYPASS)) { commit_monitor_.wait(cseq, wait_until); } else { apply_monitor_.wait(cseq, wait_until); } if (gtid != 0) { gtid->uuid = state_uuid_; gtid->seqno = cseq; } ++causal_reads_; return WSREP_OK; } catch (gu::Exception& e) { log_debug << "monitor wait failed for causal read: " << e.what(); return WSREP_TRX_FAIL; } } wsrep_status_t galera::ReplicatorSMM::to_isolation_begin(TrxHandle* trx, wsrep_trx_meta_t* meta) { if (meta != 0) { meta->gtid.uuid = state_uuid_; meta->gtid.seqno = trx->global_seqno(); meta->depends_on = trx->depends_seqno(); } assert(trx->state() == TrxHandle::S_REPLICATING); assert(trx->trx_id() == static_cast(-1)); assert(trx->local_seqno() > -1 && trx->global_seqno() > -1); assert(trx->global_seqno() > STATE_SEQNO()); wsrep_status_t retval; switch ((retval = cert_and_catch(trx))) { case WSREP_OK: { ApplyOrder ao(*trx); CommitOrder co(*trx, co_mode_); gu_trace(apply_monitor_.enter(ao)); if (co_mode_ != CommitOrder::BYPASS) try { commit_monitor_.enter(co); } catch (...) { gu_throw_fatal << "unable to enter commit monitor: " << *trx; } trx->set_state(TrxHandle::S_APPLYING); log_debug << "Executing TO isolated action: " << *trx; st_.mark_unsafe(); break; } case WSREP_TRX_FAIL: // Apply monitor is released in cert() in case of failure. trx->set_state(TrxHandle::S_ABORTING); break; default: log_error << "unrecognized retval " << retval << " for to isolation certification for " << *trx; retval = WSREP_FATAL; break; } return retval; } wsrep_status_t galera::ReplicatorSMM::to_isolation_end(TrxHandle* trx) { assert(trx->state() == TrxHandle::S_APPLYING); log_debug << "Done executing TO isolated action: " << *trx; CommitOrder co(*trx, co_mode_); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.leave(co); ApplyOrder ao(*trx); report_last_committed(cert_.set_trx_committed(trx)); apply_monitor_.leave(ao); st_.mark_safe(); return WSREP_OK; } namespace galera { static WriteSetOut* writeset_from_handle (wsrep_po_handle_t& handle, const TrxHandle::Params& trx_params) { WriteSetOut* ret = reinterpret_cast(handle.opaque); if (NULL == ret) { try { ret = new WriteSetOut( // gu::String<256>(trx_params.working_dir_) << '/' << &handle, trx_params.working_dir_, wsrep_trx_id_t(&handle), /* key format is not essential since we're not adding keys */ KeySet::version(trx_params.key_format_), NULL, 0, 0, WriteSetNG::MAX_VERSION, DataSet::MAX_VERSION, DataSet::MAX_VERSION, trx_params.max_write_set_size_); handle.opaque = ret; } catch (std::bad_alloc& ba) { gu_throw_error(ENOMEM) << "Could not create WriteSetOut"; } } return ret; } } /* namespace galera */ wsrep_status_t galera::ReplicatorSMM::preordered_collect(wsrep_po_handle_t& handle, const struct wsrep_buf* const data, size_t const count, bool const copy) { if (gu_unlikely(trx_params_.version_ < WS_NG_VERSION)) return WSREP_NOT_IMPLEMENTED; WriteSetOut* const ws(writeset_from_handle(handle, trx_params_)); for (size_t i(0); i < count; ++i) { ws->append_data(data[i].ptr, data[i].len, copy); } return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::preordered_commit(wsrep_po_handle_t& handle, const wsrep_uuid_t& source, uint64_t const flags, int const pa_range, bool const commit) { if (gu_unlikely(trx_params_.version_ < WS_NG_VERSION)) return WSREP_NOT_IMPLEMENTED; WriteSetOut* const ws(writeset_from_handle(handle, trx_params_)); if (gu_likely(true == commit)) { ws->set_flags (WriteSetNG::wsrep_flags_to_ws_flags(flags)); /* by loooking at trx_id we should be able to detect gaps / lost events * (however resending is not implemented yet). Something like * * wsrep_trx_id_t const trx_id(cert_.append_preordered(source, ws)); * * begs to be here. */ wsrep_trx_id_t const trx_id(preordered_id_.add_and_fetch(1)); WriteSetNG::GatherVector actv; size_t const actv_size(ws->gather(source, 0, trx_id, actv)); ws->set_preordered (pa_range); // also adds CRC int rcode; do { rcode = gcs_.sendv(actv, actv_size, GCS_ACT_TORDERED, false); } while (rcode == -EAGAIN && (usleep(1000), true)); if (rcode < 0) gu_throw_error(-rcode) << "Replication of preordered writeset failed."; } delete ws; handle.opaque = NULL; return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::sst_sent(const wsrep_gtid_t& state_id, int const rcode) { assert (rcode <= 0); assert (rcode == 0 || state_id.seqno == WSREP_SEQNO_UNDEFINED); assert (rcode != 0 || state_id.seqno >= 0); if (state_() != S_DONOR) { log_error << "sst sent called when not SST donor, state " << state_(); return WSREP_CONN_FAIL; } gcs_seqno_t seqno(rcode ? rcode : state_id.seqno); if (state_id.uuid != state_uuid_ && seqno >= 0) { // state we have sent no longer corresponds to the current group state // mark an error seqno = -EREMCHG; } try { gcs_.join(seqno); return WSREP_OK; } catch (gu::Exception& e) { log_error << "failed to recover from DONOR state: " << e.what(); return WSREP_CONN_FAIL; } } void galera::ReplicatorSMM::process_trx(void* recv_ctx, TrxHandle* trx) { assert(recv_ctx != 0); assert(trx != 0); assert(trx->local_seqno() > 0); assert(trx->global_seqno() > 0); assert(trx->last_seen_seqno() >= 0); assert(trx->depends_seqno() == -1); assert(trx->state() == TrxHandle::S_REPLICATING); wsrep_status_t const retval(cert_and_catch(trx)); switch (retval) { case WSREP_OK: try { gu_trace(apply_trx(recv_ctx, trx)); } catch (std::exception& e) { st_.mark_corrupt(); log_fatal << "Failed to apply trx: " << *trx; log_fatal << e.what(); log_fatal << "Node consistency compromized, aborting..."; abort(); } break; case WSREP_TRX_FAIL: // certification failed, apply monitor has been canceled trx->set_state(TrxHandle::S_ABORTING); trx->set_state(TrxHandle::S_ROLLED_BACK); break; default: // this should not happen for remote actions gu_throw_error(EINVAL) << "unrecognized retval for remote trx certification: " << retval << " trx: " << *trx; } } void galera::ReplicatorSMM::process_commit_cut(wsrep_seqno_t seq, wsrep_seqno_t seqno_l) { assert(seq > 0); assert(seqno_l > 0); LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); if (seq >= cc_seqno_) /* Refs #782. workaround for * assert(seqno >= seqno_released_) in gcache. */ cert_.purge_trxs_upto(seq, true); local_monitor_.leave(lo); log_debug << "Got commit cut from GCS: " << seq; } void galera::ReplicatorSMM::establish_protocol_versions (int proto_ver) { switch (proto_ver) { case 1: trx_params_.version_ = 1; str_proto_ver_ = 0; break; case 2: trx_params_.version_ = 1; str_proto_ver_ = 1; break; case 3: case 4: trx_params_.version_ = 2; str_proto_ver_ = 1; break; case 5: trx_params_.version_ = 3; str_proto_ver_ = 1; break; case 6: trx_params_.version_ = 3; str_proto_ver_ = 2; // gcs intelligent donor selection. // include handling dangling comma in donor string. break; case 7: // Protocol upgrade to handle IST SSL backwards compatibility, // no effect to TRX or STR protocols. trx_params_.version_ = 3; str_proto_ver_ = 2; break; default: log_fatal << "Configuration change resulted in an unsupported protocol " "version: " << proto_ver << ". Can't continue."; abort(); }; protocol_version_ = proto_ver; log_info << "REPL Protocols: " << protocol_version_ << " (" << trx_params_.version_ << ", " << str_proto_ver_ << ")"; } static bool app_wants_state_transfer (const void* const req, ssize_t const req_len) { return (req_len != (strlen(WSREP_STATE_TRANSFER_NONE) + 1) || memcmp(req, WSREP_STATE_TRANSFER_NONE, req_len)); } void galera::ReplicatorSMM::update_incoming_list(const wsrep_view_info_t& view) { static char const separator(','); ssize_t new_size(0); if (view.memb_num > 0) { new_size += view.memb_num - 1; // separators for (int i = 0; i < view.memb_num; ++i) { new_size += strlen(view.members[i].incoming); } } gu::Lock lock(incoming_mutex_); incoming_list_.clear(); incoming_list_.resize(new_size); if (new_size <= 0) return; incoming_list_ = view.members[0].incoming; for (int i = 1; i < view.memb_num; ++i) { incoming_list_ += separator; incoming_list_ += view.members[i].incoming; } } void galera::ReplicatorSMM::process_conf_change(void* recv_ctx, const wsrep_view_info_t& view_info, int repl_proto, State next_state, wsrep_seqno_t seqno_l) { assert(seqno_l > -1); update_incoming_list(view_info); LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); wsrep_seqno_t const upto(cert_.position()); if (view_info.status == WSREP_VIEW_PRIMARY) { safe_to_bootstrap_ = (view_info.memb_num == 1); } apply_monitor_.drain(upto); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.drain(upto); if (view_info.my_idx >= 0) { uuid_ = view_info.members[view_info.my_idx].id; } bool const st_required(state_transfer_required(view_info)); wsrep_seqno_t const group_seqno(view_info.state_id.seqno); const wsrep_uuid_t& group_uuid (view_info.state_id.uuid); if (st_required) { log_info << "State transfer required: " << "\n\tGroup state: " << group_uuid << ":" << group_seqno << "\n\tLocal state: " << state_uuid_<< ":" << STATE_SEQNO(); if (S_CONNECTED != state_()) state_.shift_to(S_CONNECTED); } void* app_req(0); size_t app_req_len(0); const_cast(view_info).state_gap = st_required; wsrep_cb_status_t const rcode( view_cb_(app_ctx_, recv_ctx, &view_info, 0, 0, &app_req, &app_req_len)); if (WSREP_CB_SUCCESS != rcode) { assert(app_req_len <= 0); log_fatal << "View callback failed. This is unrecoverable, " << "restart required."; close(); abort(); } else if (st_required && 0 == app_req_len && state_uuid_ != group_uuid) { log_fatal << "Local state UUID " << state_uuid_ << " is different from group state UUID " << group_uuid << ", and SST request is null: restart required."; close(); abort(); } if (view_info.view >= 0) // Primary configuration { establish_protocol_versions (repl_proto); // we have to reset cert initial position here, SST does not contain // cert index yet (see #197). // Also this must be done before releasing GCache buffers. cert_.assign_initial_position(group_seqno, trx_params_.version_); if (STATE_SEQNO() > 0) service_thd_.release_seqno(STATE_SEQNO()); // make sure all gcache buffers are released // at this point there is no ongoing master or slave transactions // and no new requests to service thread should be possible service_thd_.flush(); // make sure service thd is idle // record state seqno, needed for IST on DONOR cc_seqno_ = group_seqno; bool const app_wants_st(app_wants_state_transfer(app_req, app_req_len)); if (st_required && app_wants_st) { // GCache::Seqno_reset() happens here request_state_transfer (recv_ctx, group_uuid, group_seqno, app_req, app_req_len); } else { if (view_info.view == 1 || !app_wants_st) { update_state_uuid (group_uuid); gcache_.seqno_reset(to_gu_uuid(group_uuid), group_seqno); apply_monitor_.set_initial_position(group_seqno); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.set_initial_position(group_seqno); } if (state_() == S_CONNECTED || state_() == S_DONOR) { switch (next_state) { case S_JOINING: state_.shift_to(S_JOINING); break; case S_DONOR: if (state_() == S_CONNECTED) { state_.shift_to(S_DONOR); } break; case S_JOINED: state_.shift_to(S_JOINED); break; case S_SYNCED: state_.shift_to(S_SYNCED); synced_cb_(app_ctx_); break; default: log_debug << "next_state " << next_state; break; } } st_.set(state_uuid_, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } if (state_() == S_JOINING && sst_state_ != SST_NONE) { /* There are two reasons we can be here: * 1) we just got state transfer in request_state_transfer() above; * 2) we failed here previously (probably due to partition). */ try { gcs_.join(sst_seqno_); sst_state_ = SST_NONE; } catch (gu::Exception& e) { log_error << "Failed to JOIN the cluster after SST"; } } } else { // Non-primary configuration if (state_uuid_ != WSREP_UUID_UNDEFINED) { st_.set (state_uuid_, STATE_SEQNO(), safe_to_bootstrap_); } if (next_state != S_CONNECTED && next_state != S_CLOSING) { log_fatal << "Internal error: unexpected next state for " << "non-prim: " << next_state << ". Restart required."; close(); abort(); } state_.shift_to(next_state); } local_monitor_.leave(lo); gcs_.resume_recv(); free(app_req); } void galera::ReplicatorSMM::process_join(wsrep_seqno_t seqno_j, wsrep_seqno_t seqno_l) { LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); wsrep_seqno_t const upto(cert_.position()); apply_monitor_.drain(upto); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.drain(upto); if (seqno_j < 0 && S_JOINING == state_()) { // #595, @todo: find a way to re-request state transfer log_fatal << "Failed to receive state transfer: " << seqno_j << " (" << strerror (-seqno_j) << "), need to restart."; abort(); } else { state_.shift_to(S_JOINED); } local_monitor_.leave(lo); } void galera::ReplicatorSMM::process_sync(wsrep_seqno_t seqno_l) { LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); wsrep_seqno_t const upto(cert_.position()); apply_monitor_.drain(upto); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.drain(upto); state_.shift_to(S_SYNCED); synced_cb_(app_ctx_); local_monitor_.leave(lo); } wsrep_seqno_t galera::ReplicatorSMM::pause() { // Grab local seqno for local_monitor_ wsrep_seqno_t const local_seqno( static_cast(gcs_.local_sequence())); LocalOrder lo(local_seqno); local_monitor_.enter(lo); // Local monitor should take care that concurrent // pause requests are enqueued assert(pause_seqno_ == WSREP_SEQNO_UNDEFINED); pause_seqno_ = local_seqno; // Get drain seqno from cert index wsrep_seqno_t const upto(cert_.position()); apply_monitor_.drain(upto); assert (apply_monitor_.last_left() >= upto); if (co_mode_ != CommitOrder::BYPASS) { commit_monitor_.drain(upto); assert (commit_monitor_.last_left() >= upto); assert (commit_monitor_.last_left() == apply_monitor_.last_left()); } wsrep_seqno_t const ret(STATE_SEQNO()); st_.set(state_uuid_, ret, safe_to_bootstrap_); log_info << "Provider paused at " << state_uuid_ << ':' << ret << " (" << pause_seqno_ << ")"; return ret; } void galera::ReplicatorSMM::resume() { assert(pause_seqno_ != WSREP_SEQNO_UNDEFINED); if (pause_seqno_ == WSREP_SEQNO_UNDEFINED) { gu_throw_error(EALREADY) << "tried to resume unpaused provider"; } st_.set(state_uuid_, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); log_info << "resuming provider at " << pause_seqno_; LocalOrder lo(pause_seqno_); pause_seqno_ = WSREP_SEQNO_UNDEFINED; local_monitor_.leave(lo); log_info << "Provider resumed."; } void galera::ReplicatorSMM::desync() { wsrep_seqno_t seqno_l; ssize_t const ret(gcs_.desync(&seqno_l)); if (seqno_l > 0) { LocalOrder lo(seqno_l); // need to process it regardless of ret value if (ret == 0) { /* #706 - the check below must be state request-specific. We are not holding any locks here and must be able to wait like any other action. However practice may prove different, leaving it here as a reminder. if (local_monitor_.would_block(seqno_l)) { gu_throw_error (-EDEADLK) << "Ran out of resources waiting to " << "desync the node. " << "The node must be restarted."; } */ local_monitor_.enter(lo); if (state_() != S_DONOR) state_.shift_to(S_DONOR); local_monitor_.leave(lo); } else { local_monitor_.self_cancel(lo); } } if (ret) { gu_throw_error (-ret) << "Node desync failed."; } } void galera::ReplicatorSMM::resync() { gcs_.join(commit_monitor_.last_left()); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// //// Private ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// /* don't use this directly, use cert_and_catch() instead */ inline wsrep_status_t galera::ReplicatorSMM::cert(TrxHandle* trx) { assert(trx->state() == TrxHandle::S_REPLICATING || trx->state() == TrxHandle::S_MUST_CERT_AND_REPLAY); assert(trx->local_seqno() != WSREP_SEQNO_UNDEFINED); assert(trx->global_seqno() != WSREP_SEQNO_UNDEFINED); assert(trx->last_seen_seqno() >= 0); assert(trx->last_seen_seqno() < trx->global_seqno()); trx->set_state(TrxHandle::S_CERTIFYING); LocalOrder lo(*trx); ApplyOrder ao(*trx); CommitOrder co(*trx, co_mode_); bool interrupted(false); try { gu_trace(local_monitor_.enter(lo)); } catch (gu::Exception& e) { if (e.get_errno() == EINTR) { interrupted = true; } else throw; } wsrep_status_t retval(WSREP_OK); bool const applicable(trx->global_seqno() > STATE_SEQNO()); if (gu_likely (!interrupted)) { switch (cert_.append_trx(trx)) { case Certification::TEST_OK: if (gu_likely(applicable)) { if (trx->state() == TrxHandle::S_CERTIFYING) { retval = WSREP_OK; } else { assert(trx->state() == TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_MUST_REPLAY_AM); retval = WSREP_BF_ABORT; } } else { // this can happen after SST position has been submitted // but not all actions preceding SST initial position // have been processed trx->set_state(TrxHandle::S_MUST_ABORT); retval = WSREP_TRX_FAIL; } break; case Certification::TEST_FAILED: if (gu_unlikely(trx->is_toi() && applicable)) // small sanity check { // may happen on configuration change log_warn << "Certification failed for TO isolated action: " << *trx; assert(0); } local_cert_failures_ += trx->is_local(); trx->set_state(TrxHandle::S_MUST_ABORT); retval = WSREP_TRX_FAIL; break; } if (gu_unlikely(WSREP_TRX_FAIL == retval)) { report_last_committed(cert_.set_trx_committed(trx)); } // at this point we are about to leave local_monitor_. Make sure // trx checksum was alright before that. trx->verify_checksum(); // we must do it 'in order' for std::map reasons, so keeping // it inside the monitor gcache_.seqno_assign (trx->action(), trx->global_seqno(), trx->depends_seqno()); local_monitor_.leave(lo); } else { retval = cert_for_aborted(trx); if (WSREP_TRX_FAIL == retval) { local_monitor_.self_cancel(lo); } else { assert(WSREP_BF_ABORT == retval); } } if (gu_unlikely(WSREP_TRX_FAIL == retval && applicable)) { // applicable but failed certification: self-cancel monitors apply_monitor_.self_cancel(ao); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.self_cancel(co); } return retval; } /* pretty much any exception in cert() is fatal as it blocks local_monitor_ */ wsrep_status_t galera::ReplicatorSMM::cert_and_catch(TrxHandle* trx) { try { return cert(trx); } catch (std::exception& e) { log_fatal << "Certification exception: " << e.what(); } catch (...) { log_fatal << "Unknown certification exception"; } abort(); } /* This must be called BEFORE local_monitor_.self_cancel() due to * gcache_.seqno_assign() */ wsrep_status_t galera::ReplicatorSMM::cert_for_aborted(TrxHandle* trx) { Certification::TestResult const res(cert_.test(trx, false)); switch (res) { case Certification::TEST_OK: trx->set_state(TrxHandle::S_MUST_CERT_AND_REPLAY); return WSREP_BF_ABORT; case Certification::TEST_FAILED: if (trx->state() != TrxHandle::S_MUST_ABORT) { trx->set_state(TrxHandle::S_MUST_ABORT); } // Mext step will be monitors release. Make sure that ws was not // corrupted and cert failure is real before procedeing with that. trx->verify_checksum(); gcache_.seqno_assign (trx->action(), trx->global_seqno(), -1); return WSREP_TRX_FAIL; default: log_fatal << "Unexpected return value from Certification::test(): " << res; abort(); } } void galera::ReplicatorSMM::update_state_uuid (const wsrep_uuid_t& uuid) { if (state_uuid_ != uuid) { *(const_cast(&state_uuid_)) = uuid; std::ostringstream os; os << state_uuid_; strncpy(const_cast(state_uuid_str_), os.str().c_str(), sizeof(state_uuid_str_)); } st_.set(uuid, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } void galera::ReplicatorSMM::abort() { gcs_.close(); gu_abort(); } galera-3-25.3.20/galera/src/SConscript0000644000015300001660000000341513042054732017175 0ustar jenkinsjenkins Import('env') libgaleraxx_env = env.Clone() # Include paths libgaleraxx_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcache/src #/gcs/src ''')) libgaleraxx_srcs = [ 'mapped_buffer.cpp', 'write_set.cpp', 'data_set.cpp', 'key_set.cpp', 'write_set_ng.cpp', 'trx_handle.cpp', 'key_entry_os.cpp', 'wsdb.cpp', 'certification.cpp', 'galera_service_thd.cpp', 'wsrep_params.cpp', 'replicator_smm_params.cpp', 'gcs_action_source.cpp', 'galera_info.cpp', 'replicator.cpp', 'ist.cpp', 'gcs_dummy.cpp', 'saved_state.cpp' ] libgaleraxx_env.StaticLibrary('galera++', libgaleraxx_srcs) # Environment for multimaster library build mmlib_env = libgaleraxx_env.Clone() mmlib_env.Append(CPPFLAGS = ' -DGALERA_MULTIMASTER') mmlib_env.Replace(SHOBJPREFIX = 'libmmgalera++-') # Environment to compile provider unit (part of multimaster library) # This is needed to hardcode version and revision mmprovider_env = mmlib_env.Clone() Import ('GALERA_VER', 'GALERA_REV') mmprovider_env.Append(CPPFLAGS = ' -DGALERA_VER=\\"' + GALERA_VER + '\\"') mmprovider_env.Append(CPPFLAGS = ' -DGALERA_REV=\\"' + GALERA_REV + '\\"') # env.Append(LIBGALERA_OBJS = libgaleraxx_env.SharedObject(libgaleraxx_srcs)) env.Append(LIBMMGALERA_OBJS = mmlib_env.SharedObject([ 'replicator_smm.cpp', 'replicator_str.cpp', 'replicator_smm_stats.cpp' ])) env.Append(LIBMMGALERA_OBJS = mmprovider_env.SharedObject([ 'wsrep_provider.cpp' ])) galera-3-25.3.20/galera/src/wsrep_params.cpp0000644000015300001660000000403713042054732020373 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "wsrep_params.hpp" #include "gu_dbug.h" #include "gu_debug_sync.hpp" void wsrep_set_params (galera::Replicator& repl, const char* params) { if (!params) return; std::vector > pv; gu::Config::parse (pv, params); for (size_t i(0); i < pv.size(); ++i) { const std::string& key(pv[i].first); const std::string& value(pv[i].second); try { if (key == galera::Replicator::Param::debug_log) { bool val(gu::from_string(value)); if (val == true) { gu_conf_debug_on(); } else { gu_conf_debug_off(); } } #ifdef GU_DBUG_ON else if (key == galera::Replicator::Param::dbug) { if (value.empty()) { GU_DBUG_POP(); } else { GU_DBUG_PUSH(value.c_str()); } } else if (key == galera::Replicator::Param::signal) { gu_debug_sync_signal(value); } #endif // GU_DBUG_ON else { log_debug << "Setting param '" << key << "' = '" << value << '\''; repl.param_set(key, value); } } catch (gu::NotFound&) { log_warn << "Unknown parameter '" << key << "'"; gu_throw_error(EINVAL) << "Unknown parameter' " << key << "'"; } catch (gu::Exception& e) { log_warn << "Setting parameter '" << key << "' to '" << value << "' failed: " << e.what(); throw; } } } char* wsrep_get_params(const galera::Replicator& repl) { std::ostringstream os; os << repl.params(); return strdup(os.str().c_str()); } galera-3-25.3.20/galera/src/wsrep_params.hpp0000644000015300001660000000050113042054732020370 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #ifndef WSREP_PARAMS_HPP #define WSREP_PARAMS_HPP #include "wsrep_api.h" #include "replicator.hpp" void wsrep_set_params (galera::Replicator& repl, const char* params); char* wsrep_get_params(const galera::Replicator& repl); #endif /* WSREP_PARAMS_HPP */ galera-3-25.3.20/galera/src/fsm.hpp0000644000015300001660000001337713042054732016471 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_FSM_HPP #define GALERA_FSM_HPP #include "gu_unordered.hpp" #include "gu_throw.hpp" #include #include namespace galera { class EmptyGuard { public: bool operator()() const { return true; } }; class EmptyAction { public: void operator()() { } }; template class FSM { public: class TransAttr { public: TransAttr() : pre_guard_(0), post_guard_(0), pre_action_(0), post_action_(0) { } std::list pre_guard_; std::list post_guard_; std::list pre_action_; std::list post_action_; }; typedef gu::UnorderedMap TransMap; FSM(State const initial_state) : delete_(true), trans_map_(new TransMap), state_(initial_state), state_hist_() { } FSM(TransMap* const trans_map, State const initial_state) : delete_(false), trans_map_(trans_map), state_(initial_state), state_hist_() { } ~FSM() { if (delete_ == true) delete trans_map_; } void shift_to(State const state) { typename TransMap::iterator i(trans_map_->find(Transition(state_, state))); if (i == trans_map_->end()) { log_fatal << "FSM: no such a transition " << state_ << " -> " << state; // gu_throw_fatal << "FSM: no such a transition " // << state_ << " -> " << state; abort(); // we want to catch it in the stack } typename std::list::const_iterator gi; for (gi = i->second.pre_guard_.begin(); gi != i->second.pre_guard_.end(); ++gi) { if ((*gi)() == false) { log_fatal << "FSM: pre guard failed for " << state_ << " -> " << state; gu_throw_fatal << "FSM: pre guard failed for " << state_ << " -> " << state; } } typename std::list::iterator ai; for (ai = i->second.pre_action_.begin(); ai != i->second.pre_action_.end(); ++ai) { (*ai)(); } state_hist_.push_back(state_); state_ = state; for (ai = i->second.post_action_.begin(); ai != i->second.post_action_.end(); ++ai) { (*ai)(); } for (gi = i->second.post_guard_.begin(); gi != i->second.post_guard_.end(); ++gi) { if ((*gi)() == false) { log_fatal << "FSM: post guard failed for " << state_ << " -> " << state; gu_throw_fatal << "FSM: post guard failed for " << state_ << " -> " << state; } } } const State& operator()() const { return state_; } void add_transition(Transition const& trans) { if (trans_map_->insert( std::make_pair(trans, TransAttr())).second == false) { gu_throw_fatal << "transition " << trans.from() << " -> " << trans.to() << " already exists"; } } void add_pre_guard(Transition const& trans, Guard const& guard) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.pre_guard_.push_back(guard); } void add_post_guard(Transition const& trans, Guard const& guard) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.post_guard_.push_back(guard); } void add_pre_action(Transition const& trans, Action const& action) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.pre_action_.push_back(action); } void add_post_action(Transition const& trans, Action const& action) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.post_action_.push_back(action); } private: FSM(const FSM&); void operator=(const FSM&); bool delete_; TransMap* const trans_map_; State state_; std::vector state_hist_; }; } #endif // GALERA_FSM_HPP galera-3-25.3.20/galera/src/monitor.hpp0000644000015300001660000003216513042054732017367 0ustar jenkinsjenkins// // Copyright (C) 2010-2013 Codership Oy // #ifndef GALERA_MONITOR_HPP #define GALERA_MONITOR_HPP #include "trx_handle.hpp" #include // for gu::Mutex and gu::Cond #include #include namespace galera { template class Monitor { private: struct Process { Process() : obj_(0), cond_(), wait_cond_(), state_(S_IDLE) { } const C* obj_; gu::Cond cond_; gu::Cond wait_cond_; enum State { S_IDLE, // Slot is free S_WAITING, // Waiting to enter applying critical section S_CANCELED, S_APPLYING, // Applying S_FINISHED // Finished } state_; private: // non-copyable Process(const Process& other); void operator=(const Process&); }; static const ssize_t process_size_ = (1ULL << 16); static const size_t process_mask_ = process_size_ - 1; public: Monitor() : mutex_(), cond_(), last_entered_(-1), last_left_(-1), drain_seqno_(GU_LLONG_MAX), process_(new Process[process_size_]), entered_(0), oooe_(0), oool_(0), win_size_(0) { } ~Monitor() { delete[] process_; if (entered_ > 0) { log_info << "mon: entered " << entered_ << " oooe fraction " << double(oooe_)/entered_ << " oool fraction " << double(oool_)/entered_; } else { log_info << "apply mon: entered 0"; } } void set_initial_position(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); if (last_entered_ == -1 || seqno == -1) { // first call or reset last_entered_ = last_left_ = seqno; } else { // drain monitor up to seqno but don't reset last_entered_ // or last_left_ drain_common(seqno, lock); drain_seqno_ = GU_LLONG_MAX; } if (seqno != -1) { const size_t idx(indexof(seqno)); process_[idx].wait_cond_.broadcast(); } } void enter(C& obj) { const wsrep_seqno_t obj_seqno(obj.seqno()); const size_t idx(indexof(obj_seqno)); gu::Lock lock(mutex_); assert(obj_seqno > last_left_); pre_enter(obj, lock); if (gu_likely(process_[idx].state_ != Process::S_CANCELED)) { assert(process_[idx].state_ == Process::S_IDLE); process_[idx].state_ = Process::S_WAITING; process_[idx].obj_ = &obj; #ifdef GU_DBUG_ON obj.debug_sync(mutex_); #endif // GU_DBUG_ON while (may_enter(obj) == false && process_[idx].state_ == Process::S_WAITING) { obj.unlock(); lock.wait(process_[idx].cond_); obj.lock(); } if (process_[idx].state_ != Process::S_CANCELED) { assert(process_[idx].state_ == Process::S_WAITING || process_[idx].state_ == Process::S_APPLYING); process_[idx].state_ = Process::S_APPLYING; ++entered_; oooe_ += ((last_left_ + 1) < obj_seqno); win_size_ += (last_entered_ - last_left_); return; } } assert(process_[idx].state_ == Process::S_CANCELED); process_[idx].state_ = Process::S_IDLE; gu_throw_error(EINTR); } void leave(const C& obj) { #ifndef NDEBUG size_t idx(indexof(obj.seqno())); #endif /* NDEBUG */ gu::Lock lock(mutex_); assert(process_[idx].state_ == Process::S_APPLYING || process_[idx].state_ == Process::S_CANCELED); assert(process_[indexof(last_left_)].state_ == Process::S_IDLE); post_leave(obj, lock); } void self_cancel(C& obj) { wsrep_seqno_t const obj_seqno(obj.seqno()); size_t idx(indexof(obj_seqno)); gu::Lock lock(mutex_); assert(obj_seqno > last_left_); while (obj_seqno - last_left_ >= process_size_) // TODO: exit on error { log_warn << "Trying to self-cancel seqno out of process " << "space: obj_seqno - last_left_ = " << obj_seqno << " - " << last_left_ << " = " << (obj_seqno - last_left_) << ", process_size_: " << process_size_ << ". Deadlock is very likely."; obj.unlock(); lock.wait(cond_); obj.lock(); } assert(process_[idx].state_ == Process::S_IDLE || process_[idx].state_ == Process::S_CANCELED); if (obj_seqno > last_entered_) last_entered_ = obj_seqno; if (obj_seqno <= drain_seqno_) { post_leave(obj, lock); } else { process_[idx].state_ = Process::S_FINISHED; } } void interrupt(const C& obj) { size_t idx (indexof(obj.seqno())); gu::Lock lock(mutex_); while (obj.seqno() - last_left_ >= process_size_) // TODO: exit on error { lock.wait(cond_); } if ((process_[idx].state_ == Process::S_IDLE && obj.seqno() > last_left_ ) || process_[idx].state_ == Process::S_WAITING ) { process_[idx].state_ = Process::S_CANCELED; process_[idx].cond_.signal(); // since last_left + 1 cannot be <= S_WAITING we're not // modifying a window here. No broadcasting. } else { log_debug << "interrupting " << obj.seqno() << " state " << process_[idx].state_ << " le " << last_entered_ << " ll " << last_left_; } } wsrep_seqno_t last_left() const { gu::Lock lock(mutex_); return last_left_; } ssize_t size() const { return process_size_; } bool would_block (wsrep_seqno_t seqno) const { return (seqno - last_left_ >= process_size_ || seqno > drain_seqno_); } void drain(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); while (drain_seqno_ != GU_LLONG_MAX) { lock.wait(cond_); } drain_common(seqno, lock); // there can be some stale canceled entries update_last_left(); drain_seqno_ = GU_LLONG_MAX; cond_.broadcast(); } void wait(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); if (last_left_ < seqno) { size_t idx(indexof(seqno)); lock.wait(process_[idx].wait_cond_); } } void wait(wsrep_seqno_t seqno, const gu::datetime::Date& wait_until) { gu::Lock lock(mutex_); if (last_left_ < seqno) { size_t idx(indexof(seqno)); lock.wait(process_[idx].wait_cond_, wait_until); } } void get_stats(double* oooe, double* oool, double* win_size) { gu::Lock lock(mutex_); if (entered_ > 0) { *oooe = (oooe_ > 0 ? double(oooe_)/entered_ : .0); *oool = (oool_ > 0 ? double(oool_)/entered_ : .0); *win_size = (win_size_ > 0 ? double(win_size_)/entered_ : .0); } else { *oooe = .0; *oool = .0; *win_size = .0; } } void flush_stats() { gu::Lock lock(mutex_); oooe_ = 0; oool_ = 0; win_size_ = 0; entered_ = 0; } private: size_t indexof(wsrep_seqno_t seqno) { return (seqno & process_mask_); } bool may_enter(const C& obj) const { return obj.condition(last_entered_, last_left_); } // wait until it is possible to grab slot in monitor, // update last entered void pre_enter(C& obj, gu::Lock& lock) { assert(last_left_ <= last_entered_); const wsrep_seqno_t obj_seqno(obj.seqno()); while (would_block (obj_seqno)) // TODO: exit on error { obj.unlock(); lock.wait(cond_); obj.lock(); } if (last_entered_ < obj_seqno) last_entered_ = obj_seqno; } void update_last_left() { for (wsrep_seqno_t i = last_left_ + 1; i <= last_entered_; ++i) { Process& a(process_[indexof(i)]); if (Process::S_FINISHED == a.state_) { a.state_ = Process::S_IDLE; last_left_ = i; a.wait_cond_.broadcast(); } else { break; } } assert(last_left_ <= last_entered_); } void wake_up_next() { for (wsrep_seqno_t i = last_left_ + 1; i <= last_entered_; ++i) { Process& a(process_[indexof(i)]); if (a.state_ == Process::S_WAITING && may_enter(*a.obj_) == true) { // We need to set state to APPLYING here because if // it is the last_left_ + 1 and it gets canceled in // the race that follows exit from this function, // there will be nobody to clean up and advance // last_left_. a.state_ = Process::S_APPLYING; a.cond_.signal(); } } } void post_leave(const C& obj, gu::Lock& lock) { const wsrep_seqno_t obj_seqno(obj.seqno()); const size_t idx(indexof(obj_seqno)); if (last_left_ + 1 == obj_seqno) // we're shrinking window { process_[idx].state_ = Process::S_IDLE; last_left_ = obj_seqno; process_[idx].wait_cond_.broadcast(); update_last_left(); oool_ += (last_left_ > obj_seqno); // wake up waiters that may remain above us (last_left_ // now is max) wake_up_next(); } else { process_[idx].state_ = Process::S_FINISHED; } process_[idx].obj_ = 0; assert((last_left_ >= obj_seqno && process_[idx].state_ == Process::S_IDLE) || process_[idx].state_ == Process::S_FINISHED); assert(last_left_ != last_entered_ || process_[indexof(last_left_)].state_ == Process::S_IDLE); if ((last_left_ >= obj_seqno) || // - occupied window shrinked (last_left_ >= drain_seqno_)) // - this is to notify drain that // we reached drain_seqno_ { cond_.broadcast(); } } void drain_common(wsrep_seqno_t seqno, gu::Lock& lock) { log_debug << "draining up to " << seqno; drain_seqno_ = seqno; if (last_left_ > drain_seqno_) { log_debug << "last left greater than drain seqno"; for (wsrep_seqno_t i = drain_seqno_; i <= last_left_; ++i) { const Process& a(process_[indexof(i)]); log_debug << "applier " << i << " in state " << a.state_; } } while (last_left_ < drain_seqno_) lock.wait(cond_); } Monitor(const Monitor&); void operator=(const Monitor&); gu::Mutex mutex_; gu::Cond cond_; wsrep_seqno_t last_entered_; wsrep_seqno_t last_left_; wsrep_seqno_t drain_seqno_; Process* process_; long entered_; // entered long oooe_; // out of order entered long oool_; // out of order left long win_size_; // window between last_left_ and last_entered_ }; } #endif // GALERA_APPLY_MONITOR_HPP galera-3-25.3.20/galera/src/replicator.hpp0000644000015300001660000001304113042054732020034 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_REPLICATOR_HPP #define GALERA_REPLICATOR_HPP #include "wsrep_api.h" #include "galera_exception.hpp" #include #include namespace galera { class Statement; class RowId; class TrxHandle; //! @class Galera // // @brief Abstract Galera replicator interface class Replicator { public: struct Param { static std::string const debug_log; #ifdef GU_DBUG_ON static std::string const dbug; static std::string const signal; #endif // GU_DBUG_ON }; static const char* const TRIVIAL_SST; typedef enum { S_DESTROYED, S_CLOSED, S_CLOSING, S_CONNECTED, S_JOINING, S_JOINED, S_SYNCED, S_DONOR } State; Replicator() { } virtual ~Replicator() { } virtual wsrep_status_t connect(const std::string& cluster_name, const std::string& cluster_url, const std::string& state_donor, bool bootstrap) = 0; virtual wsrep_status_t close() = 0; virtual wsrep_status_t async_recv(void* recv_ctx) = 0; virtual int trx_proto_ver() const = 0; virtual int repl_proto_ver() const = 0; virtual TrxHandle* get_local_trx(wsrep_trx_id_t, bool) = 0; virtual void unref_local_trx(TrxHandle* trx) = 0; virtual void discard_local_trx(TrxHandle* trx_id) = 0; virtual TrxHandle* local_conn_trx(wsrep_conn_id_t conn_id, bool create) = 0; virtual void discard_local_conn_trx(wsrep_conn_id_t conn_id) = 0; virtual void discard_local_conn(wsrep_conn_id_t conn_id) = 0; virtual wsrep_status_t replicate(TrxHandle* trx, wsrep_trx_meta_t*) = 0; virtual wsrep_status_t pre_commit(TrxHandle* trx, wsrep_trx_meta_t*) =0; virtual wsrep_status_t post_commit(TrxHandle* trx) = 0; virtual wsrep_status_t post_rollback(TrxHandle* trx) = 0; virtual wsrep_status_t replay_trx(TrxHandle* trx, void* replay_ctx) = 0; virtual void abort_trx(TrxHandle* trx) = 0; virtual wsrep_status_t causal_read(wsrep_gtid_t*) = 0; virtual wsrep_status_t to_isolation_begin(TrxHandle* trx, wsrep_trx_meta_t*) = 0; virtual wsrep_status_t to_isolation_end(TrxHandle* trx) = 0; virtual wsrep_status_t preordered_collect(wsrep_po_handle_t& handle, const struct wsrep_buf* data, size_t count, bool copy) = 0; virtual wsrep_status_t preordered_commit(wsrep_po_handle_t& handle, const wsrep_uuid_t& source, uint64_t flags, int pa_range, bool commit) =0; virtual wsrep_status_t sst_sent(const wsrep_gtid_t& state_id, int rcode) = 0; virtual wsrep_status_t sst_received(const wsrep_gtid_t& state_id, const void* state, size_t state_len, int rcode) = 0; // action source interface virtual void process_trx(void* recv_ctx, TrxHandle* trx) = 0; virtual void process_commit_cut(wsrep_seqno_t seq, wsrep_seqno_t seqno_l) = 0; virtual void process_conf_change(void* recv_ctx, const wsrep_view_info_t& view_info, int repl_proto, State next_state, wsrep_seqno_t seqno_l) = 0; virtual void process_state_req(void* recv_ctx, const void* req, size_t req_size, wsrep_seqno_t seqno_l, wsrep_seqno_t donor_seq) = 0; virtual void process_join(wsrep_seqno_t seqno, wsrep_seqno_t seqno_l) = 0; virtual void process_sync(wsrep_seqno_t seqno_l) = 0; virtual const struct wsrep_stats_var* stats_get() const = 0; virtual void stats_reset() = 0; // static void stats_free(struct wsrep_stats_var*) must be declared in // the child class /*! @throws NotFound */ virtual void param_set (const std::string& key, const std::string& value) = 0; /*! @throws NotFound */ virtual std::string param_get (const std::string& key) const = 0; virtual const gu::Config& params() const = 0; virtual wsrep_seqno_t pause() = 0; virtual void resume() = 0; virtual void desync() = 0; virtual void resync() = 0; protected: static void register_params(gu::Config&); }; } #endif // GALERA_REPLICATOR_HPP galera-3-25.3.20/galera/src/replicator_smm_params.cpp0000644000015300001660000001567313042054732022263 0ustar jenkinsjenkins/* Copyright (C) 2012-2015 Codership Oy */ #include "replicator_smm.hpp" #include "gcs.hpp" #include "galera_common.hpp" #include "gu_uri.hpp" #include "write_set_ng.hpp" #include "gu_throw.hpp" const std::string galera::ReplicatorSMM::Param::base_host = "base_host"; const std::string galera::ReplicatorSMM::Param::base_port = "base_port"; const std::string galera::ReplicatorSMM::Param::base_dir = "base_dir"; static const std::string common_prefix = "repl."; const std::string galera::ReplicatorSMM::Param::commit_order = common_prefix + "commit_order"; const std::string galera::ReplicatorSMM::Param::causal_read_timeout = common_prefix + "causal_read_timeout"; const std::string galera::ReplicatorSMM::Param::proto_max = common_prefix + "proto_max"; const std::string galera::ReplicatorSMM::Param::key_format = common_prefix + "key_format"; const std::string galera::ReplicatorSMM::Param::max_write_set_size = common_prefix + "max_ws_size"; int const galera::ReplicatorSMM::MAX_PROTO_VER(7); galera::ReplicatorSMM::Defaults::Defaults() : map_() { map_.insert(Default(Param::base_port, BASE_PORT_DEFAULT)); map_.insert(Default(Param::base_dir, BASE_DIR_DEFAULT)); map_.insert(Default(Param::proto_max, gu::to_string(MAX_PROTO_VER))); map_.insert(Default(Param::key_format, "FLAT8")); map_.insert(Default(Param::commit_order, "3")); map_.insert(Default(Param::causal_read_timeout, "PT30S")); const int max_write_set_size(galera::WriteSetNG::MAX_SIZE); map_.insert(Default(Param::max_write_set_size, gu::to_string(max_write_set_size))); } const galera::ReplicatorSMM::Defaults galera::ReplicatorSMM::defaults; galera::ReplicatorSMM::InitConfig::InitConfig(gu::Config& conf, const char* const node_address, const char* const base_dir) { gu::ssl_register_params(conf); Replicator::register_params(conf); std::map::const_iterator i; for (i = defaults.map_.begin(); i != defaults.map_.end(); ++i) { if (i->second.empty()) conf.add(i->first); else conf.add(i->first, i->second); } // what is would be a better protection? int const pv(gu::from_string(conf.get(Param::proto_max))); if (pv > MAX_PROTO_VER) { log_warn << "Can't set '" << Param::proto_max << "' to " << pv << ": maximum supported value is " << MAX_PROTO_VER; conf.add(Param::proto_max, gu::to_string(MAX_PROTO_VER)); } conf.add(COMMON_BASE_HOST_KEY); conf.add(COMMON_BASE_PORT_KEY); if (node_address && strlen(node_address) > 0) { gu::URI na(node_address, false); try { std::string const host = na.get_host(); if (host == "0.0.0.0" || host == "0:0:0:0:0:0:0:0" || host == "::") { gu_throw_error(EINVAL) << "Bad value for 'node_address': '" << host << '\''; } conf.set(BASE_HOST_KEY, host); } catch (gu::NotSet& e) {} try { conf.set(BASE_PORT_KEY, na.get_port()); } catch (gu::NotSet& e) {} } // Now we store directory name to conf. This directory name // could be used by other components, for example by gcomm // to find appropriate location for view state file. if (base_dir) { conf.set(BASE_DIR, base_dir); } else { conf.set(BASE_DIR, BASE_DIR_DEFAULT); } /* register variables and defaults from other modules */ gcache::GCache::register_params(conf); if (gcs_register_params(reinterpret_cast(&conf))) { gu_throw_fatal << "Error initializing GCS parameters"; } Certification::register_params(conf); ist::register_params(conf); } galera::ReplicatorSMM::ParseOptions::ParseOptions(Replicator& repl, gu::Config& conf, const char* const opts) { conf.parse(opts); if (conf.get(Replicator::Param::debug_log)) { gu_conf_debug_on(); } else { gu_conf_debug_off(); } #ifdef GU_DBUG_ON if (conf.is_set(galera::Replicator::Param::dbug)) { GU_DBUG_PUSH(conf.get(galera::Replicator::Param::dbug).c_str()); } else { GU_DBUG_POP(); } if (conf.is_set(galera::Replicator::Param::signal)) { gu_debug_sync_signal(conf.get(galera::Replicator::Param::signal)); } #endif /* GU_DBUG_ON */ } /* helper for param_set() below */ void galera::ReplicatorSMM::set_param (const std::string& key, const std::string& value) { if (key == Param::commit_order) { log_error << "setting '" << key << "' during runtime not allowed"; gu_throw_error(EPERM) << "setting '" << key << "' during runtime not allowed"; } else if (key == Param::causal_read_timeout) { causal_read_timeout_ = gu::datetime::Period(value); } else if (key == Param::base_host || key == Param::base_port || key == Param::base_dir || key == Param::proto_max) { // nothing to do here, these params take effect only at // provider (re)start } else if (key == Param::key_format) { trx_params_.key_format_ = KeySet::version(value); } else if (key == Param::max_write_set_size) { trx_params_.max_write_set_size_ = gu::from_string(value); } else { log_warn << "parameter '" << key << "' not found"; assert(0); throw gu::NotFound(); } } void galera::ReplicatorSMM::param_set (const std::string& key, const std::string& value) { try { if (config_.get(key) == value) return; } catch (gu::NotSet&) {} bool found(false); // Note: base_host is treated separately here as it cannot have // default value known at compile time. if (defaults.map_.find(key) != defaults.map_.end() || key == Param::base_host) // is my key? { found = true; set_param (key, value); config_.set (key, value); } if (key == Certification::PARAM_LOG_CONFLICTS) { cert_.set_log_conflicts(value); return; } // this key might be for another module else if (0 != key.find(common_prefix)) { try { gcs_.param_set (key, value); found = true; } catch (gu::NotFound&) {} try { gcache_.param_set (key, value); found = true; } catch (gu::NotFound&) {} } if (!found) throw gu::NotFound(); } std::string galera::ReplicatorSMM::param_get (const std::string& key) const { return config_.get(key); } galera-3-25.3.20/galera/src/wsrep_provider.cpp0000644000015300001660000006333113042054732020744 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "key_data.hpp" #if defined(GALERA_MULTIMASTER) #include "replicator_smm.hpp" #define REPL_CLASS galera::ReplicatorSMM #else #error "Not implemented" #endif #include "wsrep_params.hpp" #include using galera::KeyOS; using galera::WriteSet; using galera::TrxHandle; using galera::TrxHandleLock; extern "C" { const char* wsrep_interface_version = (char*)WSREP_INTERFACE_VERSION; } extern "C" wsrep_status_t galera_init(wsrep_t* gh, const struct wsrep_init_args* args) { assert(gh != 0); try { gh->ctx = new REPL_CLASS (args); // Moved into galera::ReplicatorSMM::ParseOptions::ParseOptions() // wsrep_set_params(*reinterpret_cast(gh->ctx), // args->options); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); } #ifdef NDEBUG catch (std::exception& e) { log_error << e.what(); } catch (gu::NotFound& e) { /* Unrecognized parameter (logged by gu::Config::set()) */ } catch (...) { log_fatal << "non-standard exception"; } #endif return WSREP_NODE_FAIL; } extern "C" uint64_t galera_capabilities(wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); static uint64_t const v4_caps(WSREP_CAP_MULTI_MASTER | WSREP_CAP_CERTIFICATION | WSREP_CAP_PARALLEL_APPLYING | WSREP_CAP_TRX_REPLAY | WSREP_CAP_ISOLATION | WSREP_CAP_PAUSE | WSREP_CAP_CAUSAL_READS); static uint64_t const v5_caps(WSREP_CAP_INCREMENTAL_WRITESET | WSREP_CAP_UNORDERED | WSREP_CAP_PREORDERED); uint64_t caps(v4_caps); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); if (repl->repl_proto_ver() >= 5) caps |= v5_caps; return caps; } extern "C" void galera_tear_down(wsrep_t* gh) { assert(gh != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); if (repl != 0) { delete repl; gh->ctx = 0; } } extern "C" wsrep_status_t galera_parameters_set (wsrep_t* gh, const char* params) { assert(gh != 0); // cppcheck-suppress nullPointer assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); // cppcheck-suppress nullPointer if (gh) { try { wsrep_set_params (*repl, params); return WSREP_OK; } catch (gu::NotFound&) { log_warn << "Unrecognized parameter in '" << params << "'"; return WSREP_WARNING; } catch (std::exception& e) { log_debug << e.what(); // better logged in wsrep_set_params } } else { log_error << "Attempt to set parameter(s) on uninitialized replicator."; } return WSREP_NODE_FAIL; } extern "C" char* galera_parameters_get (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); try { REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return wsrep_get_params(*repl); } catch (std::exception& e) { log_error << e.what(); return 0; } catch (...) { log_fatal << "non-standard exception"; return 0; } } extern "C" wsrep_status_t galera_connect (wsrep_t* gh, const char* cluster_name, const char* cluster_url, const char* state_donor, wsrep_bool_t bootstrap) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->connect(cluster_name, cluster_url, state_donor ? state_donor : "", bootstrap); } catch (gu::Exception& e) { log_error << "Failed to connect to cluster: " << e.what(); return WSREP_NODE_FAIL; } #ifdef NDEBUG catch (std::exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } #endif // ! NDEBUG } extern "C" wsrep_status_t galera_disconnect(wsrep_t *gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->close(); } catch (std::exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_recv(wsrep_t *gh, void *recv_ctx) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); #ifdef NDEBUG try { #endif /* NDEBUG */ return repl->async_recv(recv_ctx); #ifdef NDEBUG } catch (gu::Exception& e) { log_error << e.what(); switch (e.get_errno()) { case ENOTRECOVERABLE: return WSREP_FATAL; default: return WSREP_NODE_FAIL; } } catch (std::exception& e) { log_error << e.what(); } catch (...) { log_fatal << "non-standard exception"; } #endif // NDEBUG return WSREP_FATAL; } static TrxHandle* get_local_trx(REPL_CLASS* const repl, wsrep_ws_handle_t* const handle, bool const create) { TrxHandle* trx; assert(handle != 0); if (handle->opaque != 0) { trx = static_cast(handle->opaque); assert(trx->trx_id() == handle->trx_id || wsrep_trx_id_t(-1) == handle->trx_id); trx->ref(); } else { trx = repl->get_local_trx(handle->trx_id, create); handle->opaque = trx; } return trx; } extern "C" wsrep_status_t galera_replay_trx(wsrep_t* gh, wsrep_ws_handle_t* trx_handle, void* recv_ctx) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* trx(get_local_trx(repl, trx_handle, false)); assert(trx != 0); wsrep_status_t retval; try { TrxHandleLock lock(*trx); retval = repl->replay_trx(trx, recv_ctx); } catch (std::exception& e) { log_warn << "failed to replay trx: " << *trx; log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } repl->unref_local_trx(trx); return retval; } extern "C" wsrep_status_t galera_abort_pre_commit(wsrep_t* gh, wsrep_seqno_t bf_seqno, wsrep_trx_id_t victim_trx) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); wsrep_status_t retval; TrxHandle* trx(repl->get_local_trx(victim_trx)); if (!trx) return WSREP_OK; try { TrxHandleLock lock(*trx); repl->abort_trx(trx); retval = WSREP_OK; } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } repl->unref_local_trx(trx); return retval; } static inline void discard_local_trx(REPL_CLASS* repl, wsrep_ws_handle_t* ws_handle, TrxHandle* trx) { repl->unref_local_trx(trx); repl->discard_local_trx(trx); ws_handle->opaque = 0; } extern "C" wsrep_status_t galera_post_commit (wsrep_t* gh, wsrep_ws_handle_t* ws_handle) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* trx(get_local_trx(repl, ws_handle, false)); if (trx == 0) { log_debug << "trx " << ws_handle->trx_id << " not found"; return WSREP_OK; } wsrep_status_t retval; try { TrxHandleLock lock(*trx); retval = repl->post_commit(trx); } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } discard_local_trx(repl, ws_handle, trx); return retval; } extern "C" wsrep_status_t galera_post_rollback(wsrep_t* gh, wsrep_ws_handle_t* ws_handle) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* trx(get_local_trx(repl, ws_handle, false)); if (trx == 0) { log_debug << "trx " << ws_handle->trx_id << " not found"; return WSREP_OK; } wsrep_status_t retval; try { TrxHandleLock lock(*trx); retval = repl->post_rollback(trx); } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } discard_local_trx(repl, ws_handle, trx); return retval; } static inline void append_data_array (TrxHandle* const trx, const struct wsrep_buf* const data, size_t const count, wsrep_data_type_t const type, bool const copy) { for (size_t i(0); i < count; ++i) { trx->append_data(data[i].ptr, data[i].len, type, copy); } } extern "C" wsrep_status_t galera_pre_commit(wsrep_t* const gh, wsrep_conn_id_t const conn_id, wsrep_ws_handle_t* const trx_handle, uint32_t const flags, wsrep_trx_meta_t* const meta) { assert(gh != 0); assert(gh->ctx != 0); if (meta != 0) { meta->gtid = WSREP_GTID_UNDEFINED; meta->depends_on = WSREP_SEQNO_UNDEFINED; } REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* trx(get_local_trx(repl, trx_handle, /*rbr_data != 0*/ false)); if (trx == 0) { // no data to replicate return WSREP_OK; } wsrep_status_t retval; try { TrxHandleLock lock(*trx); trx->set_conn_id(conn_id); // /* rbr_data should clearly persist over pre_commit() call */ // append_data_array (trx, rbr_data, rbr_data_len, false, false); trx->set_flags(TrxHandle::wsrep_flags_to_trx_flags(flags)); retval = repl->replicate(trx, meta); assert((!(retval == WSREP_OK || retval == WSREP_BF_ABORT) || trx->global_seqno() > 0)); if (retval == WSREP_OK) { assert(trx->last_seen_seqno() >= 0); retval = repl->pre_commit(trx, meta); } assert(retval == WSREP_OK || retval == WSREP_TRX_FAIL || retval == WSREP_BF_ABORT); } catch (gu::Exception& e) { log_error << e.what(); if (e.get_errno() == EMSGSIZE) retval = WSREP_SIZE_EXCEEDED; else retval = WSREP_NODE_FAIL; } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } repl->unref_local_trx(trx); return retval; } extern "C" wsrep_status_t galera_append_key(wsrep_t* const gh, wsrep_ws_handle_t* const trx_handle, const wsrep_key_t* const keys, size_t const keys_num, wsrep_key_type_t const key_type, wsrep_bool_t const copy) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* trx(get_local_trx(repl, trx_handle, true)); assert(trx != 0); wsrep_status_t retval; try { TrxHandleLock lock(*trx); for (size_t i(0); i < keys_num; ++i) { galera::KeyData k (repl->trx_proto_ver(), keys[i].key_parts, keys[i].key_parts_num, key_type, copy); trx->append_key(k); } retval = WSREP_OK; } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } repl->unref_local_trx(trx); return retval; } extern "C" wsrep_status_t galera_append_data(wsrep_t* const wsrep, wsrep_ws_handle_t* const trx_handle, const struct wsrep_buf* const data, size_t const count, wsrep_data_type_t const type, wsrep_bool_t const copy) { assert(wsrep != 0); assert(wsrep->ctx != 0); assert(data != NULL); assert(count > 0); if (data == NULL) { // no data to replicate return WSREP_OK; } REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(wsrep->ctx)); TrxHandle* trx(get_local_trx(repl, trx_handle, true)); assert(trx != 0); wsrep_status_t retval; try { TrxHandleLock lock(*trx); if (WSREP_DATA_ORDERED == type) append_data_array(trx, data, count, type, copy); retval = WSREP_OK; } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } repl->unref_local_trx(trx); return retval; } extern "C" wsrep_status_t galera_causal_read(wsrep_t* const wsrep, wsrep_gtid_t* const gtid) { assert(wsrep != 0); assert(wsrep->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(wsrep->ctx)); wsrep_status_t retval; try { retval = repl->causal_read(gtid); } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_free_connection(wsrep_t* const gh, wsrep_conn_id_t const conn_id) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->discard_local_conn(conn_id); return WSREP_OK; } catch (std::exception& e) { log_warn << e.what(); return WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_to_execute_start(wsrep_t* const gh, wsrep_conn_id_t const conn_id, const wsrep_key_t* const keys, size_t const keys_num, const struct wsrep_buf* const data, size_t const count, wsrep_trx_meta_t* const meta) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* trx(repl->local_conn_trx(conn_id, true)); assert(trx != 0); wsrep_status_t retval; try { TrxHandleLock lock(*trx); for (size_t i(0); i < keys_num; ++i) { galera::KeyData k(repl->trx_proto_ver(), keys[i].key_parts, keys[i].key_parts_num, WSREP_KEY_EXCLUSIVE,false); trx->append_key(k); } append_data_array(trx, data, count, WSREP_DATA_ORDERED, false); trx->set_flags(TrxHandle::wsrep_flags_to_trx_flags( WSREP_FLAG_COMMIT | WSREP_FLAG_ISOLATION)); retval = repl->replicate(trx, meta); assert((retval == WSREP_OK && trx->global_seqno() > 0) || (retval != WSREP_OK && trx->global_seqno() < 0)); if (retval == WSREP_OK) { retval = repl->to_isolation_begin(trx, meta); } } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } if (retval != WSREP_OK) // galera_to_execute_end() won't be called { repl->discard_local_conn_trx(conn_id); // trx is not needed anymore if (trx->global_seqno() < 0) // no seqno -> no index -> no automatic purging { trx->unref(); // implicit destructor } } return retval; } extern "C" wsrep_status_t galera_to_execute_end(wsrep_t* const gh, wsrep_conn_id_t const conn_id) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); wsrep_status_t retval; TrxHandle* trx(repl->local_conn_trx(conn_id, false)); try { TrxHandleLock lock(*trx); repl->to_isolation_end(trx); repl->discard_local_conn_trx(conn_id); return WSREP_OK; // trx will be unreferenced (destructed) during purge } catch (std::exception& e) { log_warn << e.what(); return WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_preordered_collect (wsrep_t* const gh, wsrep_po_handle_t* const handle, const struct wsrep_buf* const data, size_t const count, wsrep_bool_t const copy) { assert(gh != 0); assert(gh->ctx != 0); assert(handle != 0); assert(data != 0); assert(count > 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->preordered_collect(*handle, data, count, copy); } catch (std::exception& e) { log_warn << e.what(); return WSREP_TRX_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_preordered_commit (wsrep_t* const gh, wsrep_po_handle_t* const handle, const wsrep_uuid_t* const source_id, uint32_t const flags, int const pa_range, wsrep_bool_t const commit) { assert(gh != 0); assert(gh->ctx != 0); assert(handle != 0); assert(source_id != 0 || false == commit); assert(pa_range >= 0 || false == commit); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->preordered_commit(*handle, *source_id, flags, pa_range, commit); } catch (std::exception& e) { log_warn << e.what(); return WSREP_TRX_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_sst_sent (wsrep_t* const gh, const wsrep_gtid_t* const state_id, int const rcode) { assert(gh != 0); assert(gh->ctx != 0); assert(state_id != 0); assert(rcode <= 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return repl->sst_sent(*state_id, rcode); } extern "C" wsrep_status_t galera_sst_received (wsrep_t* const gh, const wsrep_gtid_t* const state_id, const void* const state, size_t const state_len, int const rcode) { assert(gh != 0); assert(gh->ctx != 0); assert(state_id != 0); assert(rcode <= 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); if (rcode < 0) { assert(state_id->seqno == WSREP_SEQNO_UNDEFINED); } return repl->sst_received(*state_id, state, state_len, rcode); } extern "C" wsrep_status_t galera_snapshot(wsrep_t* wsrep, const void* msg, size_t msg_len, const char* donor_spec) { return WSREP_NOT_IMPLEMENTED; } extern "C" struct wsrep_stats_var* galera_stats_get (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS* repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return const_cast(repl->stats_get()); } extern "C" void galera_stats_free (wsrep_t* gh, struct wsrep_stats_var* s) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS* repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return repl->stats_free(s); //REPL_CLASS::stats_free(s); } extern "C" void galera_stats_reset (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS* repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); repl->stats_reset(); } extern "C" wsrep_seqno_t galera_pause (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->pause(); } catch (gu::Exception& e) { log_error << e.what(); return -e.get_errno(); } } extern "C" wsrep_status_t galera_resume (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->resume(); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } } extern "C" wsrep_status_t galera_desync (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->desync(); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); return WSREP_TRX_FAIL; } } extern "C" wsrep_status_t galera_resync (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->resync(); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } } extern "C" wsrep_status_t galera_lock (wsrep_t* gh, const char* name, wsrep_bool_t shared, uint64_t owner, int64_t timeout) { assert(gh != 0); assert(gh->ctx != 0); return WSREP_NOT_IMPLEMENTED; } extern "C" wsrep_status_t galera_unlock (wsrep_t* gh, const char* name, uint64_t owner) { assert(gh != 0); assert(gh->ctx != 0); return WSREP_OK; } extern "C" bool galera_is_locked (wsrep_t* gh, const char* name, uint64_t* owner, wsrep_uuid_t* node) { assert(gh != 0); assert(gh->ctx != 0); return false; } static wsrep_t galera_str = { WSREP_INTERFACE_VERSION, &galera_init, &galera_capabilities, &galera_parameters_set, &galera_parameters_get, &galera_connect, &galera_disconnect, &galera_recv, &galera_pre_commit, &galera_post_commit, &galera_post_rollback, &galera_replay_trx, &galera_abort_pre_commit, &galera_append_key, &galera_append_data, &galera_causal_read, &galera_free_connection, &galera_to_execute_start, &galera_to_execute_end, &galera_preordered_collect, &galera_preordered_commit, &galera_sst_sent, &galera_sst_received, &galera_snapshot, &galera_stats_get, &galera_stats_free, &galera_stats_reset, &galera_pause, &galera_resume, &galera_desync, &galera_resync, &galera_lock, &galera_unlock, &galera_is_locked, "Galera", GALERA_VER "(r" GALERA_REV ")", "Codership Oy ", &galera_tear_down, NULL, NULL }; /* Prototype to make compiler happy */ extern "C" int wsrep_loader(wsrep_t *hptr); extern "C" int wsrep_loader(wsrep_t *hptr) { if (!hptr) return EINVAL; try { *hptr = galera_str; } catch (...) { return ENOTRECOVERABLE; } return WSREP_OK; } galera-3-25.3.20/galera/src/gcs_dummy.cpp0000644000015300001660000001450413042054732017657 0ustar jenkinsjenkins// // Copyright (C) 2011-2012 Codership Oy // #include "galera_gcs.hpp" namespace galera { DummyGcs::DummyGcs(gu::Config& config, gcache::GCache& cache, int repl_proto_ver, int appl_proto_ver, const char* node_name, const char* node_incoming) : gconf_ (&config), gcache_ (&cache), mtx_ (), cond_ (), global_seqno_ (0), local_seqno_ (0), uuid_ (), last_applied_ (GCS_SEQNO_ILL), state_ (S_OPEN), schedule_ (0), cc_ (0), cc_size_ (0), my_name_ (node_name ? node_name : "not specified"), incoming_ (node_incoming ? node_incoming : "not given"), repl_proto_ver_(repl_proto_ver), appl_proto_ver_(appl_proto_ver), report_last_applied_(false) { gu_uuid_generate (&uuid_, 0, 0); } DummyGcs::DummyGcs() : gconf_ (0), gcache_ (0), mtx_ (), cond_ (), global_seqno_ (0), local_seqno_ (0), uuid_ (), last_applied_ (GCS_SEQNO_ILL), state_ (S_OPEN), schedule_ (0), cc_ (0), cc_size_ (0), my_name_ ("not specified"), incoming_ ("not given"), repl_proto_ver_(1), appl_proto_ver_(1), report_last_applied_(false) { gu_uuid_generate (&uuid_, 0, 0); } DummyGcs::~DummyGcs() { gu::Lock lock(mtx_); assert(0 == schedule_); if (cc_) { assert (cc_size_ > 0); ::free(cc_); } } ssize_t DummyGcs::generate_cc (bool primary) { cc_size_ = sizeof(gcs_act_conf_t) + primary * (my_name_.length() + incoming_.length() + GU_UUID_STR_LEN + 3); cc_ = ::malloc(cc_size_); if (!cc_) { cc_size_ = 0; return -ENOMEM; } gcs_act_conf_t* const cc(reinterpret_cast(cc_)); if (primary) { cc->seqno = global_seqno_; cc->conf_id = 1; memcpy (cc->uuid, &uuid_, sizeof(uuid_)); cc->memb_num = 1; cc->my_idx = 0; cc->my_state = GCS_NODE_STATE_JOINED; cc->repl_proto_ver = repl_proto_ver_; cc->appl_proto_ver = appl_proto_ver_; char* const str(cc->data); ssize_t offt(0); offt += gu_uuid_print (&uuid_, str, GU_UUID_STR_LEN+1) + 1; offt += sprintf (str + offt, "%s", my_name_.c_str()) + 1; sprintf (str + offt, "%s", incoming_.c_str()); } else { cc->seqno = GCS_SEQNO_ILL; cc->conf_id = -1; cc->memb_num = 0; cc->my_idx = -1; cc->my_state = GCS_NODE_STATE_NON_PRIM; } return cc_size_; } ssize_t DummyGcs::connect(const std::string& cluster_name, const std::string& cluster_url, bool bootstrap) { gu::Lock lock(mtx_); ssize_t ret = generate_cc (true); if (ret > 0) { // state_ = S_CONNECTED; cond_.signal(); ret = 0; } return ret; } ssize_t DummyGcs::set_initial_position(const wsrep_uuid_t& uuid, gcs_seqno_t seqno) { gu::Lock lock(mtx_); if (memcmp(&uuid, &GU_UUID_NIL, sizeof(wsrep_uuid_t)) && seqno >= 0) { uuid_ = *(reinterpret_cast(&uuid)); global_seqno_ = seqno; } return 0; } void DummyGcs::close() { log_info << "Closing DummyGcs"; gu::Lock lock(mtx_); generate_cc (false); // state_ = S_CLOSED; cond_.broadcast(); // usleep(100000); // 0.1s } ssize_t DummyGcs::generate_seqno_action (gcs_action& act, gcs_act_type_t type) { gcs_seqno_t* const seqno( reinterpret_cast( ::malloc(sizeof(gcs_seqno_t)))); if (!seqno) return -ENOMEM; *seqno = global_seqno_; ++local_seqno_; act.buf = seqno; act.size = sizeof(gcs_seqno_t); act.seqno_l = local_seqno_; act.type = type; return act.size; } ssize_t DummyGcs::recv(gcs_action& act) { act.seqno_g = GCS_SEQNO_ILL; act.seqno_l = GCS_SEQNO_ILL; gu::Lock lock(mtx_); do { if (cc_) { ++local_seqno_; act.buf = cc_; act.size = cc_size_; act.seqno_l = local_seqno_; act.type = GCS_ACT_CONF; cc_ = 0; cc_size_ = 0; const gcs_act_conf_t* const cc( reinterpret_cast(act.buf)); if (cc->my_idx < 0) { assert (0 == cc->memb_num); state_ = S_CLOSED; } else { assert (1 == cc->memb_num); state_ = S_CONNECTED; } return act.size; } else if (S_CONNECTED == state_) { ssize_t ret = generate_seqno_action(act, GCS_ACT_SYNC); if (ret > 0) state_ = S_SYNCED; return ret; } else if (report_last_applied_) { report_last_applied_ = false; return generate_seqno_action(act, GCS_ACT_COMMIT_CUT); } } while (state_ > S_OPEN && (lock.wait(cond_), true)); switch (state_) { case S_OPEN: return -ENOTCONN; case S_CLOSED: return 0; default: abort(); } } ssize_t DummyGcs::interrupt(ssize_t handle) { log_fatal << "Attempt to interrupt handle: " << handle; abort(); return -ENOSYS; } } galera-3-25.3.20/galera/src/trx_handle.hpp0000644000015300001660000006402713042054732020032 0ustar jenkinsjenkins// // Copyright (C) 2010-2016 Codership Oy // #ifndef GALERA_TRX_HANDLE_HPP #define GALERA_TRX_HANDLE_HPP #include "write_set.hpp" #include "mapped_buffer.hpp" #include "fsm.hpp" #include "key_data.hpp" // for append_key() #include "key_entry_os.hpp" #include "write_set_ng.hpp" #include "wsrep_api.h" #include "gu_mutex.hpp" #include "gu_atomic.hpp" #include "gu_datetime.hpp" #include "gu_unordered.hpp" #include "gu_utils.hpp" #include "gu_macros.hpp" #include "gu_mem_pool.hpp" #include "gu_limits.h" // page size stuff #include namespace galera { static std::string const working_dir = "/tmp"; static int const WS_NG_VERSION = WriteSetNG::VER3; /* new WS version to be used */ class TrxHandle { public: /* signed int here is to detect SIZE < sizeof(TrxHandle) */ static size_t LOCAL_STORAGE_SIZE() { static size_t const ret(gu_page_size_multiple(1 << 13 /* 8Kb */)); return ret; } struct Params { std::string working_dir_; int version_; KeySet::Version key_format_; int max_write_set_size_; Params (const std::string& wdir, int ver, KeySet::Version kformat, int max_write_set_size = WriteSetNG::MAX_SIZE) : working_dir_(wdir), version_(ver), key_format_(kformat), max_write_set_size_(max_write_set_size) {} }; static const Params Defaults; enum Flags { F_COMMIT = 1 << 0, F_ROLLBACK = 1 << 1, F_OOC = 1 << 2, F_MAC_HEADER = 1 << 3, F_MAC_PAYLOAD = 1 << 4, F_ANNOTATION = 1 << 5, F_ISOLATION = 1 << 6, F_PA_UNSAFE = 1 << 7, F_PREORDERED = 1 << 8 }; static inline uint32_t wsrep_flags_to_trx_flags (uint32_t flags) { GU_COMPILE_ASSERT( WSREP_FLAG_COMMIT == int(F_COMMIT) && F_COMMIT == 1 && WSREP_FLAG_ROLLBACK == int(F_ROLLBACK) && F_ROLLBACK == 2, flags_dont_match1); uint32_t ret(flags & COMMON_FLAGS_MASK); if (flags & WSREP_FLAG_ISOLATION) ret |= F_ISOLATION; if (flags & WSREP_FLAG_PA_UNSAFE) ret |= F_PA_UNSAFE; return ret; } static inline uint32_t trx_flags_to_wsrep_flags (uint32_t flags) { GU_COMPILE_ASSERT( WSREP_FLAG_COMMIT == int(F_COMMIT) && F_COMMIT == 1 && WSREP_FLAG_ROLLBACK == int(F_ROLLBACK) && F_ROLLBACK == 2, flags_dont_match2); uint32_t ret(flags & 0x03); // setting F_COMMIT|F_ROLLBACK in one go if (flags & F_ISOLATION) ret |= WSREP_FLAG_ISOLATION; if (flags & F_PA_UNSAFE) ret |= WSREP_FLAG_PA_UNSAFE; return ret; } static inline uint32_t wsng_flags_to_trx_flags (uint32_t flags) { GU_COMPILE_ASSERT( WriteSetNG::F_COMMIT == int(F_COMMIT) && F_COMMIT == 1 && WriteSetNG::F_ROLLBACK == int(F_ROLLBACK) && F_ROLLBACK == 2, flags_dont_match3); uint32_t ret(flags & 0x03); // setting F_COMMIT|F_ROLLBACK in one go if (flags & WriteSetNG::F_TOI) ret |= F_ISOLATION; if (flags & WriteSetNG::F_PA_UNSAFE) ret |= F_PA_UNSAFE; return ret; } bool has_mac() const { return ((write_set_flags_ & (F_MAC_HEADER | F_MAC_PAYLOAD)) != 0); } bool has_annotation() const /* shall return 0 for new writeset ver */ { return ((write_set_flags_ & F_ANNOTATION) != 0); } bool is_toi() const { return ((write_set_flags_ & F_ISOLATION) != 0); } bool pa_unsafe() const { return ((write_set_flags_ & F_PA_UNSAFE) != 0); } bool preordered() const { return ((write_set_flags_ & F_PREORDERED) != 0); } typedef enum { S_EXECUTING, S_MUST_ABORT, S_ABORTING, S_REPLICATING, S_CERTIFYING, S_MUST_CERT_AND_REPLAY, S_MUST_REPLAY_AM, // grab apply_monitor, commit_monitor, replay S_MUST_REPLAY_CM, // commit_monitor, replay S_MUST_REPLAY, // replay S_REPLAYING, S_APPLYING, // grabbing apply monitor, applying S_COMMITTING, // grabbing commit monitor, committing changes S_COMMITTED, S_ROLLED_BACK } State; class Transition { public: Transition(State const from, State const to) : from_(from), to_(to) { } State from() const { return from_; } State to() const { return to_; } bool operator==(Transition const& other) const { return (from_ == other.from_ && to_ == other.to_); } class Hash { public: size_t operator()(Transition const& tr) const { return (gu::HashValue(static_cast(tr.from_)) ^ gu::HashValue(static_cast(tr.to_))); } }; private: State from_; State to_; }; typedef FSM Fsm; static Fsm::TransMap trans_map_; // Placeholder for message authentication code class Mac { public: Mac() { } ~Mac() { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); size_t serial_size() const; }; /* slave trx factory */ typedef gu::MemPool SlavePool; static TrxHandle* New(SlavePool& pool) { assert(pool.buf_size() == sizeof(TrxHandle)); void* const buf(pool.acquire()); return new(buf) TrxHandle(pool); } /* local trx factory */ typedef gu::MemPool LocalPool; static TrxHandle* New(LocalPool& pool, const Params& params, const wsrep_uuid_t& source_id, wsrep_conn_id_t conn_id, wsrep_trx_id_t trx_id) { size_t const buf_size(pool.buf_size()); assert(buf_size >= (sizeof(TrxHandle) + sizeof(WriteSetOut))); void* const buf(pool.acquire()); return new(buf) TrxHandle(pool, params, source_id, conn_id, trx_id, static_cast(buf) + sizeof(TrxHandle), buf_size - sizeof(TrxHandle)); } void lock() const { mutex_.lock(); } void unlock() const { mutex_.unlock(); } int version() const { return version_; } bool new_version() const { return version() >= WS_NG_VERSION; } const wsrep_uuid_t& source_id() const { return source_id_; } wsrep_trx_id_t trx_id() const { return trx_id_; } wsrep_conn_id_t conn_id() const { return conn_id_; } void set_conn_id(wsrep_conn_id_t conn_id) { conn_id_ = conn_id; } bool is_local() const { return local_; } bool is_certified() const { return certified_; } void mark_certified() { if (new_version()) { int dw(0); if (gu_likely(depends_seqno_ >= 0)) { dw = global_seqno_ - depends_seqno_; } write_set_in_.set_seqno(global_seqno_, dw); } certified_ = true; } bool is_committed() const { return committed_; } void mark_committed() { committed_ = true; } void set_received (const void* action, wsrep_seqno_t seqno_l, wsrep_seqno_t seqno_g) { #ifndef NDEBUG if (last_seen_seqno_ >= seqno_g) { log_fatal << "S: seqno_g: " << seqno_g << ", last_seen: " << last_seen_seqno_ << ", checksum: " << reinterpret_cast(write_set_in_.get_checksum()); } assert(last_seen_seqno_ < seqno_g); #endif action_ = action; local_seqno_ = seqno_l; global_seqno_ = seqno_g; if (write_set_flags_ & F_PREORDERED) { assert(WSREP_SEQNO_UNDEFINED == last_seen_seqno_); last_seen_seqno_ = global_seqno_ - 1; } } /* obtain global and depends seqno from the writeset (IST) */ void set_received_from_ws() { wsrep_seqno_t const seqno_g(write_set_in_.seqno()); set_received(0, -1, seqno_g); wsrep_seqno_t const seqno_d (std::max (global_seqno_ - write_set_in_.pa_range(), WSREP_SEQNO_UNDEFINED)); set_depends_seqno(seqno_d); } void set_last_seen_seqno(wsrep_seqno_t last_seen_seqno) { assert (last_seen_seqno >= 0); assert (last_seen_seqno >= last_seen_seqno_); if (new_version()) write_set_out().set_last_seen(last_seen_seqno); last_seen_seqno_ = last_seen_seqno; } void set_depends_seqno(wsrep_seqno_t seqno_lt) { depends_seqno_ = seqno_lt; } State state() const { return state_(); } void set_state(State state) { state_.shift_to(state); } long gcs_handle() const { return gcs_handle_; } void set_gcs_handle(long gcs_handle) { gcs_handle_ = gcs_handle; } const void* action() const { return action_; } wsrep_seqno_t local_seqno() const { return local_seqno_; } wsrep_seqno_t global_seqno() const { return global_seqno_; } wsrep_seqno_t last_seen_seqno() const { return last_seen_seqno_; } wsrep_seqno_t depends_seqno() const { return depends_seqno_; } uint32_t flags() const { return write_set_flags_; } void set_flags(uint32_t flags) { write_set_flags_ = flags; if (new_version()) { uint16_t ws_flags(flags & COMMON_FLAGS_MASK); if (flags & F_ISOLATION) ws_flags |= WriteSetNG::F_TOI; if (flags & F_PA_UNSAFE) ws_flags |= WriteSetNG::F_PA_UNSAFE; write_set_out().set_flags(ws_flags); } } void append_key(const KeyData& key) { /*! protection against protocol change during trx lifetime */ if (key.proto_ver != version_) { gu_throw_error(EINVAL) << "key version '" << key.proto_ver << "' does not match to trx version' " << version_ << "'"; } if (new_version()) { write_set_out().append_key(key); } else { write_set_.append_key(key); } } void append_data(const void* data, const size_t data_len, wsrep_data_type_t type, bool store) { if (new_version()) { switch (type) { case WSREP_DATA_ORDERED: write_set_out().append_data(data, data_len, store); break; case WSREP_DATA_UNORDERED: write_set_out().append_unordered(data, data_len, store); break; case WSREP_DATA_ANNOTATION: write_set_out().append_annotation(data, data_len, store); break; } } else { switch (type) { case WSREP_DATA_ORDERED: write_set_.append_data(data, data_len); break; case WSREP_DATA_UNORDERED: // just ignore unordered for compatibility with // previous versions break; case WSREP_DATA_ANNOTATION: append_annotation(reinterpret_cast(data), data_len); break; } } } static const size_t max_annotation_size_ = (1 << 16); void append_annotation(const gu::byte_t* buf, size_t buf_len) { buf_len = std::min(buf_len, max_annotation_size_ - annotation_.size()); annotation_.insert(annotation_.end(), buf, buf + buf_len); } const gu::Buffer& annotation() const { return annotation_; } const WriteSet& write_set() const { return write_set_; } size_t prepare_write_set_collection() { if (new_version()) assert(0); size_t offset; if (write_set_collection_.empty() == true) { offset = serial_size(); write_set_collection_.resize(offset); } else { offset = write_set_collection_.size(); } (void)serialize(&write_set_collection_[0], offset, 0); return offset; } void append_write_set(const void* data, size_t data_len) { if (new_version()) assert(0); const size_t offset(prepare_write_set_collection()); write_set_collection_.resize(offset + data_len); std::copy(reinterpret_cast(data), reinterpret_cast(data) + data_len, &write_set_collection_[0] + offset); } void append_write_set(const gu::Buffer& ws) { if (new_version()) { /* trx->unserialize() must have done all the job */ } else { const size_t offset(prepare_write_set_collection()); write_set_collection_.resize(offset + ws.size()); std::copy(ws.begin(), ws.end(), &write_set_collection_[0] + offset); } } MappedBuffer& write_set_collection() { return write_set_collection_; } void set_write_set_buffer(const gu::byte_t* buf, size_t buf_len) { write_set_buffer_.first = buf; write_set_buffer_.second = buf_len; } std::pair write_set_buffer() const { // If external write set buffer location not specified, // return location from write_set_collection_. This is still // needed for unit tests and IST which don't use GCache // storage. if (write_set_buffer_.first == 0) { size_t off(serial_size()); if (write_set_collection_.size() < off) { gu_throw_fatal << "Write set buffer not populated"; } return std::make_pair(&write_set_collection_[0] + off, write_set_collection_.size() - off); } return write_set_buffer_; } bool empty() const { if (new_version()) { return write_set_out().is_empty(); } else { return (write_set_.empty() == true && write_set_collection_.size() <= serial_size()); } } void flush(size_t mem_limit) { if (new_version()) { assert(0); return; } if (write_set_.get_key_buf().size() + write_set_.get_data().size() > mem_limit || mem_limit == 0) { gu::Buffer buf(write_set_.serial_size()); (void)write_set_.serialize(&buf[0], buf.size(), 0); append_write_set(buf); write_set_.clear(); } } void clear() { if (new_version()) { return; } write_set_.clear(); write_set_collection_.clear(); } void ref() { ++refcnt_; } void unref() { if (refcnt_.sub_and_fetch(1) == 0) // delete and return to pool { void* const ptr(this); gu::MemPool& mp(mem_pool_); this->~TrxHandle(); mp.recycle(ptr); } } size_t refcnt() const { return refcnt_(); } WriteSetOut& write_set_out() { /* WriteSetOut is a temporary object needed only at the writeset * collection stage. Since it may allocate considerable resources * we dont't want it to linger as long as TrxHandle is needed and * want to destroy it ASAP. So it is located immediately after * TrxHandle in the buffer allocated by TrxHandleWithStore. * I'll be damned if this+1 is not sufficiently well aligned. */ assert(new_version()); assert(wso_); return *reinterpret_cast(this + 1); } const WriteSetOut& write_set_out() const { return const_cast(this)->write_set_out(); } const WriteSetIn& write_set_in () const { return write_set_in_; } void apply(void* recv_ctx, wsrep_apply_cb_t apply_cb, const wsrep_trx_meta_t& meta) const /* throws */; void unordered(void* recv_ctx, wsrep_unordered_cb_t apply_cb) const; void verify_checksum() const /* throws */ { write_set_in_.verify_checksum(); } uint64_t get_checksum() const { if (new_version()) return write_set_in_.get_checksum(); else return 0; } size_t size() const { if (new_version()) return write_set_in_.size(); else return serial_size(); } void update_stats(gu::Atomic& kc, gu::Atomic& kb, gu::Atomic& db, gu::Atomic& ub) { assert(new_version()); kc += write_set_in_.keyset().count(); kb += write_set_in_.keyset().size(); db += write_set_in_.dataset().size(); ub += write_set_in_.unrdset().size(); } bool exit_loop() const { return exit_loop_; } void set_exit_loop(bool x) { exit_loop_ |= x; } typedef gu::UnorderedMap, KeyEntryPtrHash, KeyEntryPtrEqualAll> CertKeySet; CertKeySet& cert_keys() { return cert_keys_; } size_t serial_size() const; size_t serialize (gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); void release_write_set_out() { if (gu_likely(new_version())) { assert(wso_); write_set_out().~WriteSetOut(); wso_ = false; } } private: static uint32_t const COMMON_FLAGS_MASK = 0x03; /* slave trx ctor */ explicit TrxHandle(gu::MemPool& mp) : source_id_ (WSREP_UUID_UNDEFINED), conn_id_ (-1), trx_id_ (-1), mutex_ (), write_set_collection_(Defaults.working_dir_), state_ (&trans_map_, S_EXECUTING), local_seqno_ (WSREP_SEQNO_UNDEFINED), global_seqno_ (WSREP_SEQNO_UNDEFINED), last_seen_seqno_ (WSREP_SEQNO_UNDEFINED), depends_seqno_ (WSREP_SEQNO_UNDEFINED), timestamp_ (), write_set_ (Defaults.version_), write_set_in_ (), annotation_ (), cert_keys_ (), write_set_buffer_ (0, 0), mem_pool_ (mp), action_ (0), gcs_handle_ (-1), version_ (Defaults.version_), refcnt_ (1), write_set_flags_ (0), local_ (false), certified_ (false), committed_ (false), exit_loop_ (false), wso_ (false), mac_ () {} /* local trx ctor */ TrxHandle(gu::MemPool& mp, const Params& params, const wsrep_uuid_t& source_id, wsrep_conn_id_t conn_id, wsrep_trx_id_t trx_id, gu::byte_t* reserved, size_t reserved_size) : source_id_ (source_id), conn_id_ (conn_id), trx_id_ (trx_id), mutex_ (), write_set_collection_(params.working_dir_), state_ (&trans_map_, S_EXECUTING), local_seqno_ (WSREP_SEQNO_UNDEFINED), global_seqno_ (WSREP_SEQNO_UNDEFINED), last_seen_seqno_ (WSREP_SEQNO_UNDEFINED), depends_seqno_ (WSREP_SEQNO_UNDEFINED), timestamp_ (gu_time_calendar()), write_set_ (params.version_), write_set_in_ (), annotation_ (), cert_keys_ (), write_set_buffer_ (0, 0), mem_pool_ (mp), action_ (0), gcs_handle_ (-1), version_ (params.version_), refcnt_ (1), write_set_flags_ (0), local_ (true), certified_ (false), committed_ (false), exit_loop_ (false), wso_ (new_version()), mac_ () { init_write_set_out(params, reserved, reserved_size); } ~TrxHandle() { if (wso_) release_write_set_out(); } void init_write_set_out(const Params& params, gu::byte_t* store, size_t store_size) { if (wso_) { assert(store); assert(store_size > sizeof(WriteSetOut)); WriteSetOut* wso = &write_set_out(); assert(static_cast(wso) == static_cast(store)); new (wso) WriteSetOut (params.working_dir_, trx_id_, params.key_format_, store + sizeof(WriteSetOut), store_size - sizeof(WriteSetOut), 0, WriteSetNG::MAX_VERSION, DataSet::MAX_VERSION, DataSet::MAX_VERSION, params.max_write_set_size_); } } TrxHandle(const TrxHandle&); void operator=(const TrxHandle& other); wsrep_uuid_t source_id_; wsrep_conn_id_t conn_id_; wsrep_trx_id_t trx_id_; mutable gu::Mutex mutex_; MappedBuffer write_set_collection_; FSM state_; wsrep_seqno_t local_seqno_; wsrep_seqno_t global_seqno_; wsrep_seqno_t last_seen_seqno_; wsrep_seqno_t depends_seqno_; int64_t timestamp_; WriteSet write_set_; WriteSetIn write_set_in_; gu::Buffer annotation_; CertKeySet cert_keys_; // Write set buffer location if stored outside TrxHandle. std::pair write_set_buffer_; gu::MemPool& mem_pool_; const void* action_; long gcs_handle_; int version_; gu::Atomic refcnt_; uint32_t write_set_flags_; bool local_; bool certified_; bool committed_; bool exit_loop_; bool wso_; Mac mac_; friend class Wsdb; friend class Certification; friend std::ostream& operator<<(std::ostream& os, const TrxHandle& trx); friend class TrxHandleWithStore; }; /* class TrxHandle */ std::ostream& operator<<(std::ostream& os, TrxHandle::State s); std::ostream& operator<<(std::ostream& os, const TrxHandle& th); class TrxHandleLock { public: TrxHandleLock(TrxHandle& trx) : trx_(trx) { trx_.lock(); } ~TrxHandleLock() { trx_.unlock(); } private: TrxHandle& trx_; }; /* class TrxHnadleLock */ template class Unref2nd { public: void operator()(T& t) const { t.second->unref(); } }; } /* namespace galera*/ #endif // GALERA_TRX_HANDLE_HPP galera-3-25.3.20/galera/src/write_set_ng.cpp0000644000015300001660000002240713042054732020362 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #include "write_set_ng.hpp" #include "gu_time.h" #include #include namespace galera { WriteSetNG::Header::Offsets::Offsets ( int a01, int a02, int a03, int a04, int a05, int a06, int a07, int a08, int a09, int a10, int a11, int a12 ) : header_ver_ (a01), header_size_ (a02), sets_ (a03), flags_ (a04), pa_range_ (a05), last_seen_ (a06), seqno_ (a07), timestamp_ (a08), source_id_ (a09), conn_id_ (a10), trx_id_ (a11), crc_ (a12) {} WriteSetNG::Header::Offsets const WriteSetNG::Header::V3 ( V3_HEADER_VERS_OFF, V3_HEADER_SIZE_OFF, V3_SETS_OFF, V3_FLAGS_OFF, V3_PA_RANGE_OFF, V3_LAST_SEEN_OFF, V3_SEQNO_OFF, V3_TIMESTAMP_OFF, V3_SOURCE_ID_OFF, V3_CONN_ID_OFF, V3_TRX_ID_OFF, V3_CRC_OFF ); size_t WriteSetNG::Header::gather (KeySet::Version const kver, DataSet::Version const dver, bool unord, bool annot, uint16_t const flags, const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, GatherVector& out) { GU_COMPILE_ASSERT(MAX_VERSION <= 15, header_version_too_big); GU_COMPILE_ASSERT(KeySet::MAX_VERSION <= 15, keyset_version_too_big); GU_COMPILE_ASSERT(DataSet::MAX_VERSION <= 3, dataset_version_too_big); assert (uint(ver_) <= MAX_VERSION); assert (uint(kver) <= KeySet::MAX_VERSION); assert (uint(dver) <= DataSet::MAX_VERSION); local_[V3_MAGIC_OFF] = MAGIC_BYTE; local_[V3_HEADER_VERS_OFF] = (version() << 4) | VER3; local_[V3_HEADER_SIZE_OFF] = size(); local_[V3_SETS_OFF] = (kver << 4) | (dver << 2) | (unord * V3_UNORD_FLAG) | (annot * V3_ANNOT_FLAG); uint16_t* const fl(reinterpret_cast(local_ + V3_FLAGS_OFF)); uint16_t* const pa(reinterpret_cast(local_ + V3_PA_RANGE_OFF)); *fl = gu::htog(flags); *pa = 0; // certified ws will have dep. window of at least 1 wsrep_uuid_t* const sc(reinterpret_cast(local_ + V3_SOURCE_ID_OFF)); *sc = source; uint64_t* const cn(reinterpret_cast(local_ + V3_CONN_ID_OFF)); uint64_t* const tx(reinterpret_cast(local_ + V3_TRX_ID_OFF)); *cn = gu::htog(conn); *tx = gu::htog(trx); gu::Buf const buf = { ptr_, size() }; out->push_back(buf); return buf.size; } void WriteSetNG::Header::set_last_seen(const wsrep_seqno_t& last_seen) { assert (ptr_); assert (size_ > 0); uint64_t* const ls (reinterpret_cast(ptr_ +V3_LAST_SEEN_OFF)); uint64_t* const ts (reinterpret_cast(ptr_ +V3_TIMESTAMP_OFF)); *ls = gu::htog(last_seen); *ts = gu::htog(gu_time_monotonic()); update_checksum (ptr_, size() - V3_CHECKSUM_SIZE); } void WriteSetNG::Header::set_seqno(const wsrep_seqno_t& seqno, uint16_t const pa_range) { assert (ptr_); assert (size_ > 0); assert (seqno > 0); uint16_t* const pa(reinterpret_cast(ptr_ + V3_PA_RANGE_OFF)); uint64_t* const sq(reinterpret_cast(ptr_ + V3_SEQNO_OFF)); *pa = gu::htog(pa_range); *sq = gu::htog(seqno); update_checksum (ptr_, size() - V3_CHECKSUM_SIZE); } gu::Buf WriteSetNG::Header::copy(bool const include_keys, bool const include_unrd) const { assert (ptr_ != &local_[0]); assert (size_t(size()) <= sizeof(local_)); gu::byte_t* const lptr(&local_[0]); ::memcpy (lptr, ptr_, size_); gu::byte_t const mask(0x0c | (0xf0 * include_keys) | (0x02 * include_unrd)); lptr[V3_SETS_OFF] &= mask; // zero up versions of non-included sets update_checksum (lptr, size() - V3_CHECKSUM_SIZE); gu::Buf ret = { lptr, size_ }; return ret; } void WriteSetNG::Header::Checksum::verify (Version ver, const void* const ptr, ssize_t const hsize) { assert (hsize > 0); type_t check(0), hcheck(0); size_t const csize(hsize - V3_CHECKSUM_SIZE); compute (ptr, csize, check); hcheck = *(reinterpret_cast( reinterpret_cast(ptr) + csize )); if (gu_likely(check == hcheck)) return; gu_throw_error (EINVAL) << "Header checksum mismatch: computed " << std::hex << std::setfill('0') << std::setw(sizeof(check) << 1) << check << ", found " << std::setw(sizeof(hcheck) << 1) << hcheck; } const char WriteSetOut::keys_suffix[] = "_keys"; const char WriteSetOut::data_suffix[] = "_data"; const char WriteSetOut::unrd_suffix[] = "_unrd"; const char WriteSetOut::annt_suffix[] = "_annt"; void WriteSetIn::init (ssize_t const st) { assert(false == check_thr_); const gu::byte_t* const pptr (header_.payload()); ssize_t const psize(size_ - header_.size()); assert (psize >= 0); KeySet::Version const kver(header_.keyset_ver()); if (kver != KeySet::EMPTY) gu_trace(keys_.init (kver, pptr, psize)); assert (false == check_); assert (false == check_thr_); if (gu_likely(st > 0)) /* checksum enforced */ { if (size_ >= st) { /* buffer too big, start checksumming in background */ int const err(pthread_create (&check_thr_id_, NULL, checksum_thread, this)); if (gu_likely(0 == err)) { check_thr_ = true; return; } log_warn << "Starting checksum thread failed: " << err << '(' << ::strerror(err) << ')'; /* fall through to checksum in foreground */ } checksum(); checksum_fin(); } else /* checksum skipped, pretend it's alright */ { check_ = true; } } void WriteSetIn::checksum() { const gu::byte_t* pptr (header_.payload()); ssize_t psize(size_ - header_.size()); assert (psize >= 0); try { if (keys_.size() > 0) { gu_trace(keys_.checksum()); psize -= keys_.size(); assert (psize >= 0); pptr += keys_.size(); } DataSet::Version const dver(header_.dataset_ver()); if (gu_likely(dver != DataSet::EMPTY)) { assert (psize > 0); gu_trace(data_.init(dver, pptr, psize)); gu_trace(data_.checksum()); size_t tmpsize(data_.size()); psize -= tmpsize; pptr += tmpsize; assert (psize >= 0); if (header_.has_unrd()) { gu_trace(unrd_.init(dver, pptr, psize)); gu_trace(unrd_.checksum()); size_t tmpsize(unrd_.size()); psize -= tmpsize; pptr += tmpsize; } if (header_.has_annt()) { annt_ = new DataSetIn(); gu_trace(annt_->init(dver, pptr, psize)); // we don't care for annotation checksum - it is not a reason // to throw an exception and abort execution // gu_trace(annt_->checksum()); #ifndef NDEBUG psize -= annt_->size(); #endif } } #ifndef NDEBUG assert (psize == 0); #endif check_ = true; } catch (std::exception& e) { log_error << e.what(); } catch (...) { log_error << "Non-standard exception in WriteSet::checksum()"; } } void WriteSetIn::write_annotation(std::ostream& os) const { annt_->rewind(); ssize_t const count(annt_->count()); for (ssize_t i = 0; os.good() && i < count; ++i) { gu::Buf abuf = annt_->next(); os.write(static_cast(abuf.ptr), abuf.size); } } size_t WriteSetIn::gather(GatherVector& out, bool include_keys, bool include_unrd) const { if (include_keys && include_unrd) { gu::Buf buf = { header_.ptr(), size_ }; out->push_back(buf); return size_; } else { out->reserve(out->size() + 4); gu::Buf buf(header_.copy(include_keys, include_unrd)); out->push_back(buf); size_t ret(buf.size); if (include_keys) { buf = keys_.buf(); out->push_back(buf); ret += buf.size; } buf = data_.buf(); out->push_back (buf); ret += buf.size; if (include_unrd) { buf = unrd_.buf(); out->push_back(buf); ret += buf.size; } if (annotated()) { buf = annt_->buf(); out->push_back (buf); ret += buf.size; } return ret; } } } /* namespace galera */ galera-3-25.3.20/galera/src/replicator_smm_stats.cpp0000644000015300001660000003135613042054732022132 0ustar jenkinsjenkins/* Copyright (C) 2010 Codership Oy */ #include "replicator_smm.hpp" #include "uuid.hpp" #include #include // @todo: should be protected static member of the parent class static const size_t GALERA_STAGE_MAX(11); // @todo: should be protected static member of the parent class static const char* state_str[GALERA_STAGE_MAX] = { "Initialized", "Joining", "Joining: preparing for State Transfer", "Joining: requested State Transfer", "Joining: receiving State Transfer", "Joined", "Synced", "Donor/Desynced", "Joining: State Transfer request failed", "Joining: State Transfer failed", "Destroyed" }; // @todo: should be protected static member of the parent class static wsrep_member_status_t state2stats(galera::ReplicatorSMM::State state) { switch (state) { case galera::ReplicatorSMM::S_DESTROYED : case galera::ReplicatorSMM::S_CLOSED : case galera::ReplicatorSMM::S_CLOSING : case galera::ReplicatorSMM::S_CONNECTED : return WSREP_MEMBER_UNDEFINED; case galera::ReplicatorSMM::S_JOINING : return WSREP_MEMBER_JOINER; case galera::ReplicatorSMM::S_JOINED : return WSREP_MEMBER_JOINED; case galera::ReplicatorSMM::S_SYNCED : return WSREP_MEMBER_SYNCED; case galera::ReplicatorSMM::S_DONOR : return WSREP_MEMBER_DONOR; } gu_throw_fatal << "invalid state " << state; } // @todo: should be protected static member of the parent class static const char* state2stats_str(galera::ReplicatorSMM::State state, galera::ReplicatorSMM::SstState sst_state) { using galera::ReplicatorSMM; switch (state) { case galera::ReplicatorSMM::S_DESTROYED : return state_str[10]; case galera::ReplicatorSMM::S_CLOSED : case galera::ReplicatorSMM::S_CLOSING: case galera::ReplicatorSMM::S_CONNECTED: { if (sst_state == ReplicatorSMM::SST_REQ_FAILED) return state_str[8]; else if (sst_state == ReplicatorSMM::SST_FAILED) return state_str[9]; else return state_str[0]; } case galera::ReplicatorSMM::S_JOINING: { if (sst_state == ReplicatorSMM::SST_WAIT) return state_str[4]; else return state_str[1]; } case galera::ReplicatorSMM::S_JOINED : return state_str[5]; case galera::ReplicatorSMM::S_SYNCED : return state_str[6]; case galera::ReplicatorSMM::S_DONOR : return state_str[7]; } gu_throw_fatal << "invalid state " << state; } typedef enum status_vars { STATS_STATE_UUID = 0, STATS_PROTOCOL_VERSION, STATS_LAST_APPLIED, STATS_REPLICATED, STATS_REPLICATED_BYTES, STATS_KEYS_COUNT, STATS_KEYS_BYTES, STATS_DATA_BYTES, STATS_UNRD_BYTES, STATS_RECEIVED, STATS_RECEIVED_BYTES, STATS_LOCAL_COMMITS, STATS_LOCAL_CERT_FAILURES, STATS_LOCAL_REPLAYS, STATS_LOCAL_SEND_QUEUE, STATS_LOCAL_SEND_QUEUE_MAX, STATS_LOCAL_SEND_QUEUE_MIN, STATS_LOCAL_SEND_QUEUE_AVG, STATS_LOCAL_RECV_QUEUE, STATS_LOCAL_RECV_QUEUE_MAX, STATS_LOCAL_RECV_QUEUE_MIN, STATS_LOCAL_RECV_QUEUE_AVG, STATS_LOCAL_CACHED_DOWNTO, STATS_FC_PAUSED_NS, STATS_FC_PAUSED_AVG, STATS_FC_SENT, STATS_FC_RECEIVED, STATS_CERT_DEPS_DISTANCE, STATS_APPLY_OOOE, STATS_APPLY_OOOL, STATS_APPLY_WINDOW, STATS_COMMIT_OOOE, STATS_COMMIT_OOOL, STATS_COMMIT_WINDOW, STATS_LOCAL_STATE, STATS_LOCAL_STATE_COMMENT, STATS_CERT_INDEX_SIZE, STATS_CAUSAL_READS, STATS_CERT_INTERVAL, STATS_INCOMING_LIST, STATS_MAX } StatusVars; static const struct wsrep_stats_var wsrep_stats[STATS_MAX + 1] = { { "local_state_uuid", WSREP_VAR_STRING, { 0 } }, { "protocol_version", WSREP_VAR_INT64, { 0 } }, { "last_committed", WSREP_VAR_INT64, { -1 } }, { "replicated", WSREP_VAR_INT64, { 0 } }, { "replicated_bytes", WSREP_VAR_INT64, { 0 } }, { "repl_keys", WSREP_VAR_INT64, { 0 } }, { "repl_keys_bytes", WSREP_VAR_INT64, { 0 } }, { "repl_data_bytes", WSREP_VAR_INT64, { 0 } }, { "repl_other_bytes", WSREP_VAR_INT64, { 0 } }, { "received", WSREP_VAR_INT64, { 0 } }, { "received_bytes", WSREP_VAR_INT64, { 0 } }, { "local_commits", WSREP_VAR_INT64, { 0 } }, { "local_cert_failures", WSREP_VAR_INT64, { 0 } }, { "local_replays", WSREP_VAR_INT64, { 0 } }, { "local_send_queue", WSREP_VAR_INT64, { 0 } }, { "local_send_queue_max", WSREP_VAR_INT64, { 0 } }, { "local_send_queue_min", WSREP_VAR_INT64, { 0 } }, { "local_send_queue_avg", WSREP_VAR_DOUBLE, { 0 } }, { "local_recv_queue", WSREP_VAR_INT64, { 0 } }, { "local_recv_queue_max", WSREP_VAR_INT64, { 0 } }, { "local_recv_queue_min", WSREP_VAR_INT64, { 0 } }, { "local_recv_queue_avg", WSREP_VAR_DOUBLE, { 0 } }, { "local_cached_downto", WSREP_VAR_INT64, { 0 } }, { "flow_control_paused_ns", WSREP_VAR_INT64, { 0 } }, { "flow_control_paused", WSREP_VAR_DOUBLE, { 0 } }, { "flow_control_sent", WSREP_VAR_INT64, { 0 } }, { "flow_control_recv", WSREP_VAR_INT64, { 0 } }, { "cert_deps_distance", WSREP_VAR_DOUBLE, { 0 } }, { "apply_oooe", WSREP_VAR_DOUBLE, { 0 } }, { "apply_oool", WSREP_VAR_DOUBLE, { 0 } }, { "apply_window", WSREP_VAR_DOUBLE, { 0 } }, { "commit_oooe", WSREP_VAR_DOUBLE, { 0 } }, { "commit_oool", WSREP_VAR_DOUBLE, { 0 } }, { "commit_window", WSREP_VAR_DOUBLE, { 0 } }, { "local_state", WSREP_VAR_INT64, { 0 } }, { "local_state_comment", WSREP_VAR_STRING, { 0 } }, { "cert_index_size", WSREP_VAR_INT64, { 0 } }, { "causal_reads", WSREP_VAR_INT64, { 0 } }, { "cert_interval", WSREP_VAR_DOUBLE, { 0 } }, { "incoming_addresses", WSREP_VAR_STRING, { 0 } }, { 0, WSREP_VAR_STRING, { 0 } } }; void galera::ReplicatorSMM::build_stats_vars ( std::vector& stats) { const struct wsrep_stats_var* ptr(wsrep_stats); do { stats.push_back(*ptr); } while (ptr++->name != 0); stats[STATS_STATE_UUID].value._string = state_uuid_str_; } const struct wsrep_stats_var* galera::ReplicatorSMM::stats_get() const { if (S_DESTROYED == state_()) return 0; std::vector sv(wsrep_stats_); sv[STATS_PROTOCOL_VERSION ].value._int64 = protocol_version_; sv[STATS_LAST_APPLIED ].value._int64 = apply_monitor_.last_left(); sv[STATS_REPLICATED ].value._int64 = replicated_(); sv[STATS_REPLICATED_BYTES ].value._int64 = replicated_bytes_(); sv[STATS_KEYS_COUNT ].value._int64 = keys_count_(); sv[STATS_KEYS_BYTES ].value._int64 = keys_bytes_(); sv[STATS_DATA_BYTES ].value._int64 = data_bytes_(); sv[STATS_UNRD_BYTES ].value._int64 = unrd_bytes_(); sv[STATS_RECEIVED ].value._int64 = gcs_as_.received(); sv[STATS_RECEIVED_BYTES ].value._int64 = gcs_as_.received_bytes(); sv[STATS_LOCAL_COMMITS ].value._int64 = local_commits_(); sv[STATS_LOCAL_CERT_FAILURES].value._int64 = local_cert_failures_(); sv[STATS_LOCAL_REPLAYS ].value._int64 = local_replays_(); struct gcs_stats stats; gcs_.get_stats (&stats); sv[STATS_LOCAL_SEND_QUEUE ].value._int64 = stats.send_q_len; sv[STATS_LOCAL_SEND_QUEUE_MAX].value._int64 = stats.send_q_len_max; sv[STATS_LOCAL_SEND_QUEUE_MIN].value._int64 = stats.send_q_len_min; sv[STATS_LOCAL_SEND_QUEUE_AVG].value._double = stats.send_q_len_avg; sv[STATS_LOCAL_RECV_QUEUE ].value._int64 = stats.recv_q_len; sv[STATS_LOCAL_RECV_QUEUE_MAX].value._int64 = stats.recv_q_len_max; sv[STATS_LOCAL_RECV_QUEUE_MIN].value._int64 = stats.recv_q_len_min; sv[STATS_LOCAL_RECV_QUEUE_AVG].value._double = stats.recv_q_len_avg; sv[STATS_LOCAL_CACHED_DOWNTO ].value._int64 = gcache_.seqno_min(); sv[STATS_FC_PAUSED_NS ].value._int64 = stats.fc_paused_ns; sv[STATS_FC_PAUSED_AVG ].value._double = stats.fc_paused_avg; sv[STATS_FC_SENT ].value._int64 = stats.fc_sent; sv[STATS_FC_RECEIVED ].value._int64 = stats.fc_received; double avg_cert_interval(0); double avg_deps_dist(0); size_t index_size(0); cert_.stats_get(avg_cert_interval, avg_deps_dist, index_size); sv[STATS_CERT_DEPS_DISTANCE ].value._double = avg_deps_dist; sv[STATS_CERT_INTERVAL ].value._double = avg_cert_interval; sv[STATS_CERT_INDEX_SIZE ].value._int64 = index_size; double oooe; double oool; double win; const_cast&>(apply_monitor_). get_stats(&oooe, &oool, &win); sv[STATS_APPLY_OOOE ].value._double = oooe; sv[STATS_APPLY_OOOL ].value._double = oool; sv[STATS_APPLY_WINDOW ].value._double = win; const_cast&>(commit_monitor_). get_stats(&oooe, &oool, &win); sv[STATS_COMMIT_OOOE ].value._double = oooe; sv[STATS_COMMIT_OOOL ].value._double = oool; sv[STATS_COMMIT_WINDOW ].value._double = win; sv[STATS_LOCAL_STATE ].value._int64 = state2stats(state_()); sv[STATS_LOCAL_STATE_COMMENT ].value._string = state2stats_str(state_(), sst_state_); sv[STATS_CAUSAL_READS].value._int64 = causal_reads_(); // Get gcs backend status gu::Status status; gcs_.get_status(status); #ifdef GU_DBUG_ON status.insert("debug_sync_waiters", gu_debug_sync_waiters()); #endif // GU_DBUG_ON // Dynamical strings are copied into buffer allocated after stats var array. // Compute space needed. size_t tail_size(0); for (gu::Status::const_iterator i(status.begin()); i != status.end(); ++i) { tail_size += i->first.size() + 1 + i->second.size() + 1; } gu::Lock lock_inc(incoming_mutex_); tail_size += incoming_list_.size() + 1; /* Create a buffer to be passed to the caller. */ // The buffer size needed: // * Space for wsrep_stats_ array // * Space for additional elements from status map // * Trailing space for string store size_t const vec_size( (sv.size() + status.size())*sizeof(struct wsrep_stats_var)); struct wsrep_stats_var* const buf(static_cast( gu_malloc(vec_size + tail_size))); if (buf) { // Resize sv to have enough space for variables from status sv.resize(sv.size() + status.size()); // Initial tail_buf position char* tail_buf(reinterpret_cast(buf + sv.size())); // Assign incoming list strncpy(tail_buf, incoming_list_.c_str(), incoming_list_.size() + 1); sv[STATS_INCOMING_LIST].value._string = tail_buf; tail_buf += incoming_list_.size() + 1; // Iterate over dynamical status variables and assing strings size_t sv_pos(STATS_INCOMING_LIST + 1); for (gu::Status::const_iterator i(status.begin()); i != status.end(); ++i, ++sv_pos) { // Name strncpy(tail_buf, i->first.c_str(), i->first.size() + 1); sv[sv_pos].name = tail_buf; tail_buf += i->first.size() + 1; // Type sv[sv_pos].type = WSREP_VAR_STRING; // Value strncpy(tail_buf, i->second.c_str(), i->second.size() + 1); sv[sv_pos].value._string = tail_buf; tail_buf += i->second.size() + 1; } assert(sv_pos == sv.size() - 1); // NULL terminate sv[sv_pos].name = 0; sv[sv_pos].type = WSREP_VAR_STRING; sv[sv_pos].value._string = 0; assert(static_cast(tail_buf - reinterpret_cast(buf)) == vec_size + tail_size); assert(reinterpret_cast(buf)[vec_size + tail_size - 1] == '\0'); // Finally copy sv vector to buf memcpy(buf, &sv[0], vec_size); } else { log_warn << "Failed to allocate stats vars buffer to " << (vec_size + tail_size) << " bytes. System is running out of memory."; } return buf; } void galera::ReplicatorSMM::stats_reset() { if (S_DESTROYED == state_()) return; gcs_.flush_stats (); apply_monitor_.flush_stats(); commit_monitor_.flush_stats(); cert_.stats_reset(); } void galera::ReplicatorSMM::stats_free(struct wsrep_stats_var* arg) { gu_free(arg); } galera-3-25.3.20/galera/src/write_set.hpp0000644000015300001660000000417213042054732017702 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_WRITE_SET_HPP #define GALERA_WRITE_SET_HPP #include "key_os.hpp" #include "key_data.hpp" #include "wsrep_api.h" #include "gu_buffer.hpp" #include "gu_logger.hpp" #include "gu_unordered.hpp" #include #include #include namespace galera { class WriteSet { public: typedef std::deque KeySequence; WriteSet(int version) : version_(version), keys_(), key_refs_(), data_() { } void set_version(int version) { version_ = version; } const gu::Buffer& get_data() const { return data_; } void append_key(const KeyData&); void append_data(const void*data, size_t data_len) { data_.reserve(data_.size() + data_len); data_.insert(data_.end(), reinterpret_cast(data), reinterpret_cast(data) + data_len); } void get_keys(KeySequence&) const; const gu::Buffer& get_key_buf() const { return keys_; } bool empty() const { return (data_.size() == 0 && keys_.size() == 0); } void clear() { keys_.clear(), key_refs_.clear(), data_.clear(); } // Return offset to beginning of key or data segment and length // of that segment static std::pair segment(const gu::byte_t*, size_t, size_t); // Scan key sequence from buffer, return offset from the beginning of // buffer after scan. static size_t keys(const gu::byte_t*, size_t, size_t, int, KeySequence&); size_t serialize(gu::byte_t*, size_t, size_t) const; size_t unserialize(const gu::byte_t*, size_t, size_t); size_t serial_size() const; private: typedef gu::UnorderedMultimap KeyRefMap; int version_; gu::Buffer keys_; KeyRefMap key_refs_; gu::Buffer data_; }; } #endif // GALERA_WRITE_SET_HPP galera-3-25.3.20/galera/src/mapped_buffer.hpp0000644000015300001660000000320713042054732020472 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_MAPPED_BUFFER_HPP #define GALERA_MAPPED_BUFFER_HPP #include #include "gu_buffer.hpp" namespace galera { class MappedBuffer { public: typedef gu::byte_t& reference; typedef gu::byte_t const& const_reference; typedef gu::byte_t* iterator; typedef gu::byte_t const* const_iterator; MappedBuffer(const std::string& working_dir, size_t threshold = 1 << 20); ~MappedBuffer(); reference operator[](size_t i) { return buf_[i]; } const_reference operator[](size_t i) const { return buf_[i]; } void reserve(size_t sz); void resize(size_t sz); void clear(); size_t size() const { return buf_size_; } bool empty() const { return (buf_size_ == 0); } iterator begin() { return buf_; } iterator end() { return (buf_ + buf_size_); } const_iterator begin() const { return buf_; } const_iterator end() const { return (buf_ + buf_size_); } private: MappedBuffer(const MappedBuffer&); void operator=(const MappedBuffer&); const std::string& working_dir_; // working dir for data files std::string file_; int fd_; // file descriptor size_t threshold_; // in-memory threshold gu::byte_t* buf_; // data buffer size_t buf_size_; // buffer size (inserted data size) size_t real_buf_size_; // real buffer size (allocated size) }; } #endif // GALERA_MAPPED_BUFFER_HPP galera-3-25.3.20/galera/src/key_os.hpp0000644000015300001660000002204713042054732017167 0ustar jenkinsjenkins// // Copyright (C) 2011-2013 Codership Oy // #ifndef GALERA_KEY_HPP #define GALERA_KEY_HPP #include "wsrep_api.h" #include "gu_hash.h" #include "gu_serialize.hpp" #include "gu_unordered.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_vlq.hpp" #include #include #include #include #include namespace galera { // helper to cast from any kind of pointer to void template static inline void* void_cast(const C* c) { return const_cast(reinterpret_cast(c)); } class KeyPartOS { public: KeyPartOS(const gu::byte_t* buf, size_t buf_size) : buf_(buf), buf_size_(buf_size) { } const gu::byte_t* buf() const { return buf_; } size_t size() const { return buf_size_; } size_t key_len() const { #ifndef GALERA_KEY_VLQ return buf_[0]; #else size_t ret; (void)gu::uleb128_decode(buf_, buf_size_, 0, ret); return ret; #endif } #ifndef GALERA_KEY_VLQ const gu::byte_t* key() const { return buf_ + 1; } #else const gu::byte_t* key() const { size_t not_used; return buf_ + gu::uleb128_decode(buf_, buf_size_, 0, not_used); } #endif bool operator==(const KeyPartOS& other) const { return (other.buf_size_ == buf_size_ && memcmp(other.buf_, buf_, buf_size_) == 0); } private: const gu::byte_t* buf_; size_t buf_size_; }; inline std::ostream& operator<<(std::ostream& os, const KeyPartOS& kp) { const std::ostream::fmtflags prev_flags(os.flags(std::ostream::hex)); const char prev_fill(os.fill('0')); for (const gu::byte_t* i(kp.key()); i != kp.key() + kp.key_len(); ++i) { os << std::setw(2) << static_cast(*i); } os.flags(prev_flags); os.fill(prev_fill); return os; } class KeyOS { public: enum { F_SHARED = 0x1 }; KeyOS(int version) : version_(version), flags_(), keys_() { } KeyOS(int version, const wsrep_buf_t* keys, size_t keys_len, uint8_t flags) : version_(version), flags_ (flags), keys_ () { if (keys_len > 255) { gu_throw_error(EINVAL) << "maximum number of key parts exceeded: " << keys_len; } switch (version) { case 1: case 2: for (size_t i(0); i < keys_len; ++i) { size_t const offset(keys_.size()); size_t key_len(keys[i].len); const gu::byte_t* base(reinterpret_cast( keys[i].ptr)); #ifndef GALERA_KEY_VLQ if (gu_unlikely(key_len > 0xff)) key_len = 0xff; keys_.reserve(offset + 1 + key_len); keys_.insert(keys_.end(), key_len); keys_.insert(keys_.end(), base, base + key_len); #else size_t len_size(gu::uleb128_size(key_len)); keys_.resize(offset + len_size); (void)gu::uleb128_encode( key_len, &keys_[0], keys_.size(), offset); keys_.insert(keys_.end(), base, base + keys[i].key_len); #endif } break; default: gu_throw_fatal << "unsupported key version: " << version_; } } template KeyOS(int version, Ci begin, Ci end, uint8_t flags) : version_(version), flags_(flags), keys_() { for (Ci i(begin); i != end; ++i) { keys_.insert( keys_.end(), i->buf(), i->buf() + i->size()); } } int version() const { return version_; } template C key_parts() const { C ret; size_t i(0); size_t const keys_size(keys_.size()); while (i < keys_size) { #ifndef GALERA_KEY_VLQ size_t key_len(keys_[i] + 1); #else size_t key_len; size_t offset( gu::uleb128_decode(&keys_[0], keys_size, i, key_len)); key_len += offset - i; #endif if (gu_unlikely((i + key_len) > keys_size)) { gu_throw_fatal << "Keys buffer overflow by " << i + key_len - keys_size << " bytes: " << i + key_len << '/' << keys_size; } KeyPartOS kp(&keys_[i], key_len); ret.push_back(kp); i += key_len; } assert(i == keys_size); return ret; } uint8_t flags() const { return flags_; } bool operator==(const KeyOS& other) const { return (keys_ == other.keys_); } bool equal_all(const KeyOS& other) const { return (version_ == other.version_ && flags_ == other.flags_ && keys_ == other.keys_); } size_t size() const { return keys_.size() + sizeof(*this); } size_t hash() const { return gu_table_hash(&keys_[0], keys_.size()); } size_t hash_with_flags() const { return hash() ^ gu_table_hash(&flags_, sizeof(flags_)); } size_t serialize(gu::byte_t*, size_t, size_t) const; size_t unserialize(const gu::byte_t*, size_t, size_t); size_t serial_size() const; private: friend std::ostream& operator<<(std::ostream& os, const KeyOS& key); int version_; uint8_t flags_; gu::Buffer keys_; }; inline std::ostream& operator<<(std::ostream& os, const KeyOS& key) { std::ostream::fmtflags flags(os.flags()); switch (key.version_) { case 2: os << std::hex << static_cast(key.flags()) << " "; // Fall through case 1: { std::deque dq(key.key_parts >()); std::copy(dq.begin(), dq.end(), std::ostream_iterator(os, " ")); break; } default: gu_throw_fatal << "unsupported key version: " << key.version_; } os.flags(flags); return os; } inline size_t KeyOS::serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { switch (version_) { #ifndef GALERA_KEY_VLQ case 1: return gu::serialize2(keys_, buf, buflen, offset); case 2: offset = gu::serialize1(flags_, buf, buflen, offset); return gu::serialize2(keys_, buf, buflen, offset); #else case 1: { size_t keys_size(keys_.size()); offset = gu::uleb128_encode(keys_size, buf, buflen, offset); assert (offset + key_size <= buflen); std::copy(&keys_[0], &keys_[0] + keys_size, buf + offset); return (offset + keys_size); } #endif default: log_fatal << "Internal error: unsupported key version: " << version_; abort(); return 0; } } inline size_t KeyOS::unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { switch (version_) { #ifndef GALERA_KEY_VLQ case 1: return gu::unserialize2(buf, buflen, offset, keys_); case 2: offset = gu::unserialize1(buf, buflen, offset, flags_); return gu::unserialize2(buf, buflen, offset, keys_); #else case 1: { size_t len; offset = gu::uleb128_decode(buf, buflen, offset, len); keys_.resize(len); std::copy(buf + offset, buf + offset + len, keys_.begin()); return (offset + len); } #endif default: gu_throw_error(EPROTONOSUPPORT) << "unsupported key version: " << version_; } } inline size_t KeyOS::serial_size() const { switch (version_) { #ifndef GALERA_KEY_VLQ case 1: return gu::serial_size2(keys_); case 2: return (gu::serial_size(flags_) + gu::serial_size2(keys_)); #else case 1: { size_t size(gu::uleb128_size(keys_.size())); return (size + keys_.size()); } #endif default: log_fatal << "Internal error: unsupported key version: " << version_; abort(); return 0; } } } #endif // GALERA_KEY_HPP galera-3-25.3.20/galera/src/gcs_action_source.cpp0000644000015300001660000001203713042054732021360 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "replicator.hpp" #include "gcs_action_source.hpp" #include "trx_handle.hpp" #include "gu_serialize.hpp" #include "galera_info.hpp" #include // Exception-safe way to release action pointer when it goes out // of scope class Release { public: Release(struct gcs_action& act, gcache::GCache& gcache) : act_(act), gcache_(gcache) {} ~Release() { switch (act_.type) { case GCS_ACT_TORDERED: break; case GCS_ACT_STATE_REQ: gcache_.free(const_cast(act_.buf)); break; default: ::free(const_cast(act_.buf)); break; } } private: struct gcs_action& act_; gcache::GCache& gcache_; }; static galera::Replicator::State state2repl(const gcs_act_conf_t& conf) { switch (conf.my_state) { case GCS_NODE_STATE_NON_PRIM: if (conf.my_idx >= 0) return galera::Replicator::S_CONNECTED; else return galera::Replicator::S_CLOSING; case GCS_NODE_STATE_PRIM: return galera::Replicator::S_CONNECTED; case GCS_NODE_STATE_JOINER: return galera::Replicator::S_JOINING; case GCS_NODE_STATE_JOINED: return galera::Replicator::S_JOINED; case GCS_NODE_STATE_SYNCED: return galera::Replicator::S_SYNCED; case GCS_NODE_STATE_DONOR: return galera::Replicator::S_DONOR; case GCS_NODE_STATE_MAX:; } gu_throw_fatal << "unhandled gcs state: " << conf.my_state; GU_DEBUG_NORETURN; } galera::GcsActionTrx::GcsActionTrx(TrxHandle::SlavePool& pool, const struct gcs_action& act) : trx_(TrxHandle::New(pool)) // TODO: this dynamic allocation should be unnecessary { assert(act.seqno_l != GCS_SEQNO_ILL); assert(act.seqno_g != GCS_SEQNO_ILL); const gu::byte_t* const buf = static_cast(act.buf); // size_t offset(trx_->unserialize(buf, act.size, 0)); gu_trace(trx_->unserialize(buf, act.size, 0)); //trx_->append_write_set(buf + offset, act.size - offset); // moved to unserialize trx_->set_write_set_buffer(buf + offset, act.size - offset); trx_->set_received(act.buf, act.seqno_l, act.seqno_g); trx_->lock(); } galera::GcsActionTrx::~GcsActionTrx() { assert(trx_->refcnt() >= 1); trx_->unlock(); trx_->unref(); } void galera::GcsActionSource::dispatch(void* const recv_ctx, const struct gcs_action& act, bool& exit_loop) { assert(recv_ctx != 0); assert(act.buf != 0); assert(act.seqno_l > 0); switch (act.type) { case GCS_ACT_TORDERED: { assert(act.seqno_g > 0); GcsActionTrx trx(trx_pool_, act); trx.trx()->set_state(TrxHandle::S_REPLICATING); gu_trace(replicator_.process_trx(recv_ctx, trx.trx())); exit_loop = trx.trx()->exit_loop(); // this is the end of trx lifespan break; } case GCS_ACT_COMMIT_CUT: { wsrep_seqno_t seq; gu::unserialize8(static_cast(act.buf), act.size, 0, seq); gu_trace(replicator_.process_commit_cut(seq, act.seqno_l)); break; } case GCS_ACT_CONF: { const gcs_act_conf_t* conf(static_cast(act.buf)); wsrep_view_info_t* view_info( galera_view_info_create(conf, conf->my_state == GCS_NODE_STATE_PRIM) ); gu_trace(replicator_.process_conf_change(recv_ctx, *view_info, conf->repl_proto_ver, state2repl(*conf), act.seqno_l)); free(view_info); if (conf->conf_id < 0 && conf->memb_num == 0) { log_debug << "Received SELF-LEAVE. Closing connection."; // called after being shifted to S_CLOSING state. gcs_.close(); } break; } case GCS_ACT_STATE_REQ: gu_trace(replicator_.process_state_req(recv_ctx, act.buf, act.size, act.seqno_l, act.seqno_g)); break; case GCS_ACT_JOIN: { wsrep_seqno_t seq; gu::unserialize8(static_cast(act.buf), act.size, 0, seq); gu_trace(replicator_.process_join(seq, act.seqno_l)); break; } case GCS_ACT_SYNC: gu_trace(replicator_.process_sync(act.seqno_l)); break; default: gu_throw_fatal << "unrecognized action type: " << act.type; } } ssize_t galera::GcsActionSource::process(void* recv_ctx, bool& exit_loop) { struct gcs_action act; ssize_t rc(gcs_.recv(act)); if (rc > 0) { Release release(act, gcache_); ++received_; received_bytes_ += rc; gu_trace(dispatch(recv_ctx, act, exit_loop)); } return rc; } galera-3-25.3.20/galera/src/key_set.cpp0000644000015300001660000002631713042054732017340 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #include "key_set.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include #include // std::transform namespace galera { void KeySet::throw_version(int ver) { gu_throw_error(EINVAL) << "Unsupported KeySet version: " << ver; } static const char* ver_str[KeySet::MAX_VERSION + 1] = { "EMPTY", "FLAT8", "FLAT8A", "FLAT16", "FLAT16A" }; KeySet::Version KeySet::version (const std::string& ver) { std::string tmp(ver); std::transform(tmp.begin(), tmp.end(), tmp.begin(), ::toupper); for (int i(EMPTY); i <= MAX_VERSION; ++i) { if (tmp == ver_str[i]) return version(i); } gu_throw_error(EINVAL) << "Unsupported KeySet version: " << ver; throw; } size_t KeySet::KeyPart::store_annotation (const wsrep_buf_t* const parts, int const part_num, gu::byte_t* buf, int const size) { assert(size >= 0); static size_t const max_len(std::numeric_limits::max()); ann_size_t ann_size; int tmp_size(sizeof(ann_size)); for (int i(0); i <= part_num; ++i) { tmp_size += 1 + std::min(parts[i].len, max_len); } tmp_size = std::min(tmp_size, size); ann_size = std::min(tmp_size, std::numeric_limits::max()); assert (ann_size <= size); ann_size_t const tmp(gu::htog(ann_size)); size_t off(sizeof(tmp)); ::memcpy(buf, &tmp, off); for (int i(0); i <= part_num && off < ann_size; ++i) { size_t const left(ann_size - off - 1); gu::byte_t const part_len (std::min(std::min(parts[i].len, left), max_len)); buf[off] = part_len; ++off; const gu::byte_t* const from( reinterpret_cast(parts[i].ptr)); std::copy(from, from + part_len, buf + off); off += part_len; } assert (off == ann_size); // log_info << "stored annotation of size: " << ann_size; return ann_size; } void KeySet::KeyPart::print_annotation(std::ostream& os, const gu::byte_t* buf) { ann_size_t const ann_size(gu::gtoh( *reinterpret_cast(buf))); size_t const begin(sizeof(ann_size_t)); size_t off(begin); while (off < ann_size) { if (off != begin) os << '/'; gu::byte_t const part_len(buf[off]); ++off; bool const last(ann_size == off + part_len); /* this is an attempt to guess whether we should interpret key part as * a string or numerical value */ bool const alpha(!last || part_len > 8); os << gu::Hexdump (buf + off, part_len, alpha); off += part_len; } } void KeySet::KeyPart::throw_buffer_too_short (size_t expected, size_t got) { gu_throw_error (EINVAL) << "Buffer too short: expected " << expected << ", got " << got; } void KeySet::KeyPart::throw_bad_prefix (gu::byte_t p) { gu_throw_error(EPROTO) << "Unsupported key prefix: " << p; } void KeySet::KeyPart::throw_match_empty_key (Version my, Version other) { gu_throw_error(EINVAL) << "Attempt to match against an empty key (" << my << ',' << other << ')'; } void KeySet::KeyPart::print (std::ostream& os) const { Version const ver(version()); size_t const size(ver != EMPTY ? base_size(ver, data_, 1) : 0); os << '(' << int(exclusive()) << ',' << ver_str[ver] << ')' << gu::Hexdump(data_, size); if (annotated(ver)) { os << "="; print_annotation (os, data_ + size); } } KeySetOut::KeyPart::KeyPart (KeyParts& added, KeySetOut& store, const KeyPart* parent, const KeyData& kd, int const part_num) : hash_ (parent->hash_), part_ (0), value_(reinterpret_cast(kd.parts[part_num].ptr)), size_ (kd.parts[part_num].len), ver_ (parent->ver_), own_ (false) { assert (ver_); uint32_t const s(gu::htog(size_)); hash_.append (&s, sizeof(s)); hash_.append (value_, size_); KeySet::KeyPart::TmpStore ts; KeySet::KeyPart::HashData hd; hash_.gather(hd.buf); /* only leaf part of the key can be exclusive */ bool const leaf (part_num + 1 == kd.parts_num); bool const exclusive (!kd.shared() && leaf); assert (kd.parts_num > part_num); KeySet::KeyPart kp(ts, hd, ver_, exclusive, kd.parts, part_num); #if 0 /* find() way */ /* the reason to use find() first, instead of going straight to insert() * is that we need to insert the part that was persistently stored in the * key set. At the same time we can't yet store the key part in the key set * before we can be sure that it is not a duplicate. Sort of 2PC. */ KeyParts::iterator found(added.find(kp)); if (added.end() != found) { if (exclusive && found->shared()) { /* need to ditch shared and add exclusive version of the key */ added.erase(found); found = added.end(); } else if (leaf || found->exclusive()) { #ifndef NDEBUG if (leaf) log_debug << "KeyPart ctor: full duplicate of " << *found; else log_debug << "Duplicate of exclusive: " << *found; #endif throw DUPLICATE(); } } if (added.end() == found) /* no such key yet, store and add */ { kp.store (store); std::pair res(added.insert(kp)); assert (res.second); found = res.first; } part_ = &(*found); #else /* insert() way */ std::pair const inserted(added.insert(kp)); if (inserted.second) { /* The key part was successfully inserted, store it in the key set buffer */ inserted.first->store (store); } else { /* A matching key part instance is already present in the set, check constraints */ if (exclusive && inserted.first->shared()) { /* The key part instance present in the set has weaker constraint, store this instance as well and update inserted to point there. (we can't update already stored data - it was checksummed, so we have to store a duplicate with a stronger constraint) */ kp.store (store); inserted.first->update_ptr(kp.ptr()); /* It is a hack, but it should be safe to modify key part already inserted into unordered set, as long as modification does not change hash and equality test results. And we get it to point to a duplicate here.*/ } else if (leaf || inserted.first->exclusive()) { /* we don't throw DUPLICATE for branch parts, just ignore them. DUPLICATE is thrown only when the whole key is a duplicate. */ #ifndef NDEBUG if (leaf) log_debug << "KeyPart ctor: full duplicate of " << *inserted.first; else log_debug << "Duplicate of exclusive: " << *inserted.first; #endif throw DUPLICATE(); } } part_ = &(*inserted.first); #endif /* insert() way */ } void KeySetOut::KeyPart::print (std::ostream& os) const { if (part_) os << *part_; else os << "0x0"; os << '(' << gu::Hexdump(value_, size_, true) << ')'; } #define CHECK_PREVIOUS_KEY 1 size_t KeySetOut::append (const KeyData& kd) { int i(0); #ifdef CHECK_PREVIOUS_KEY /* find common ancestor with the previous key */ for (; i < kd.parts_num && size_t(i + 1) < prev_.size() && prev_[i + 1].match(kd.parts[i].ptr, kd.parts[i].len); ++i) { #if 0 log_info << "prev[" << (i+1) << "]\n" << prev_[i+1] << "\nmatches\n" << gu::Hexdump(kd.parts[i].ptr, kd.parts[i].len, true); #endif /* 0 */ } // log_info << "matched " << i << " parts"; /* if we have a fully matched key OR common ancestor is exclusive, return */ if (i > 0) { assert (size_t(i) < prev_.size()); if (prev_[i].exclusive()) { assert (prev_.size() == (i + 1U)); // only leaf can be exclusive. // log_info << "Returning after matching exclusive key:\n"<< prev_[i]; return 0; } if (kd.parts_num == i) /* leaf */ { assert (prev_[i].shared()); if (kd.shared()) { // log_info << "Returning after matching all " << i << " parts"; return 0; } else /* need to add exclusive copy of the key */ --i; } } int const anc(i); const KeyPart* parent(&prev_[anc]); // log_info << "Common ancestor: " << anc << ' ' << *parent; #else KeyPart tmp(prev_[0]); const KeyPart* const parent(&tmp); #endif /* CHECK_PREVIOUS_KEY */ /* create parts that didn't match previous key and add to the set * of preiously added keys. */ size_t const old_size (size()); int j(0); for (; i < kd.parts_num; ++i, ++j) { try { KeyPart kp(added_, *this, parent, kd, i); #ifdef CHECK_PREVIOUS_KEY if (size_t(j) < new_.size()) { new_[j] = kp; } else { new_().push_back (kp); } parent = &new_[j]; #else if (kd.copy) kp.acquire(); if (i + 1 != kd.parts_num) tmp = kp; // <- updating parent for next iteration #endif /* CHECK_PREVIOUS_KEY */ // log_info << "pushed " << kp; } catch (KeyPart::DUPLICATE& e) { assert (i + 1 == kd.parts_num); /* There is a very small probability that child part thows DUPLICATE * even after parent was added as a new key. It does not matter: * a duplicate will be a duplicate in certification as well. */ #ifndef NDEBUG log_debug << "Returning after catching a DUPLICATE. Part: " << i; #endif /* NDEBUG */ goto out; } } assert (i == kd.parts_num); assert (anc + j == kd.parts_num); #ifdef CHECK_PREVIOUS_KEY /* copy new parts to prev_ */ prev_().resize(1 + kd.parts_num); std::copy(new_().begin(), new_().begin() + j, prev_().begin() + anc + 1); /* acquire key part value if it is volatile */ if (kd.copy) for (int k(anc + 1); size_t(k) < prev_.size(); ++k) { prev_[k].acquire(); } #endif /* CHECK_PREVIOUS_KEY */ out: return size() - old_size; } #if 0 const KeyIn& galera::KeySetIn::get_key() const { size_t offset(0); while (offset < keys_.size()) { KeyOS key(version_); if ((offset = unserialize(&keys_[0], keys_.size(), offset, key)) == 0) { gu_throw_fatal << "failed to unserialize key"; } s.push_back(key); } assert(offset == keys_.size()); } #endif } /* namespace galera */ galera-3-25.3.20/galera/src/galera_common.hpp0000644000015300001660000000234213042054732020475 0ustar jenkinsjenkins/* * Copyright (C) 2012 Codership Oy */ /*! * @file common.hpp * * @brief Imports definitions from the global common.h */ #ifndef GALERA_COMMON_HPP #define GALERA_COMMON_HPP #if defined(HAVE_COMMON_H) #include #endif #include namespace galera { #if defined(HAVE_COMMON_H) static std::string const BASE_PORT_KEY(COMMON_BASE_PORT_KEY); static std::string const BASE_PORT_DEFAULT(COMMON_BASE_PORT_DEFAULT); static std::string const BASE_HOST_KEY(COMMON_BASE_HOST_KEY); static std::string const BASE_DIR(COMMON_BASE_DIR_KEY); static std::string const BASE_DIR_DEFAULT(COMMON_BASE_DIR_DEFAULT); static std::string const GALERA_STATE_FILE(COMMON_STATE_FILE); static std::string const VIEW_STATE_FILE(COMMON_VIEW_STAT_FILE); #else static std::string const BASE_PORT_KEY("base_port"); static std::string const BASE_PORT_DEFAULT("4567"); static std::string const BASE_HOST_KEY("base_host"); static std::string const BASE_DIR("base_dir"); static std::string const BASE_DIR_DEFAULT("."); static std::string const GALERA_STATE_FILE("grastate.dat"); static std::string const VIEW_STATE_FILE("gvwstate.dat"); #endif } #endif /* GALERA_COMMON_HPP */ galera-3-25.3.20/galera/src/key_data.hpp0000644000015300001660000000176013042054732017456 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // #ifndef GALERA_KEY_DATA_HPP #define GALERA_KEY_DATA_HPP #include "wsrep_api.h" namespace galera { struct KeyData { const wsrep_buf_t* const parts; long const parts_num; int const proto_ver; wsrep_key_type_t const type; bool const copy; KeyData (int const pv, const wsrep_buf_t* const k, long const kn, wsrep_key_type_t const tp, bool const cp) : parts (k), parts_num (kn), proto_ver (pv), type (tp), copy (cp) {} KeyData (const KeyData& kd) : parts (kd.parts), parts_num(kd.parts_num), proto_ver(kd.proto_ver), type (kd.type), copy (kd.copy) {} bool shared() const { return type == WSREP_KEY_SHARED; } private: KeyData& operator = (const KeyData&); }; /* struct KeyData */ } /* namespace galera */ #endif /* GALERA_KEY_DATA_HPP */ galera-3-25.3.20/galera/src/saved_state.cpp0000644000015300001660000001647013042054732020176 0ustar jenkinsjenkins// // Copyright (C) 2012-2016 Codership Oy // #include "saved_state.hpp" #include "gu_dbug.h" #include "uuid.hpp" #include #define __STDC_FORMAT_MACROS #include #include #include namespace galera { #define VERSION "2.1" #define MAX_SIZE 256 SavedState::SavedState (const std::string& file) : fs_ (0), uuid_ (WSREP_UUID_UNDEFINED), seqno_ (WSREP_SEQNO_UNDEFINED), safe_to_bootstrap_(true), unsafe_ (0), corrupt_ (false), mtx_ (), written_uuid_ (uuid_), current_len_ (0), total_marks_ (0), total_locks_ (0), total_writes_ (0) { GU_DBUG_EXECUTE("galera_init_invalidate_state", unlink(file.c_str());); std::ifstream ifs(file.c_str()); if (ifs.fail()) { log_warn << "Could not open state file for reading: '" << file << '\''; } fs_ = fopen(file.c_str(), "a"); if (!fs_) { gu_throw_error(errno) << "Could not open state file for writing: '" << file << "'. Check permissions and/or disk space."; } // We take exclusive lock on state file in order to avoid possibility // of two Galera replicators sharing the same state file. struct flock flck; flck.l_start = 0; flck.l_len = 0; flck.l_type = F_WRLCK; flck.l_whence = SEEK_SET; if (::fcntl(fileno(fs_), F_SETLK, &flck)) { log_warn << "Could not get exclusive lock on state file: " << file << ": " << ::strerror(errno); return; } std::string version("0.8"); std::string line; while (getline(ifs, line), ifs.good()) { std::istringstream istr(line); std::string param; istr >> param; if (param[0] == '#') { log_debug << "read comment: " << line; } else if (param == "version:") { istr >> version; // nothing to do with this yet log_debug << "read version: " << version; } else if (param == "uuid:") { try { istr >> uuid_; log_debug << "read saved state uuid: " << uuid_; } catch (gu::Exception& e) { log_error << e.what(); uuid_ = WSREP_UUID_UNDEFINED; } } else if (param == "seqno:") { istr >> seqno_; log_debug << "read saved state seqno: " << seqno_; } else if (param == "safe_to_bootstrap:") { istr >> safe_to_bootstrap_; log_debug << "read safe_to_bootstrap: " << safe_to_bootstrap_; } } log_info << "Found saved state: " << uuid_ << ':' << seqno_ << ", safe_to_bootsrap: " << safe_to_bootstrap_; #if 0 // we'll probably have it legal if (seqno_ < 0 && uuid_ != WSREP_UUID_UNDEFINED) { log_warn << "Negative seqno with valid UUID: " << uuid_ << ':' << seqno_ << ". Discarding UUID."; uuid_ = WSREP_UUID_UNDEFINED; } #endif written_uuid_ = uuid_; current_len_ = ftell (fs_); log_debug << "Initialized current_len_ to " << current_len_; if (current_len_ <= MAX_SIZE) { fs_ = freopen (file.c_str(), "r+", fs_); } else // normalize file contents { fs_ = freopen (file.c_str(), "w+", fs_); // truncate current_len_ = 0; set (uuid_, seqno_, safe_to_bootstrap_); } } SavedState::~SavedState () { if (fs_) { // Closing file descriptor should release the lock, but still... struct flock flck; flck.l_start = 0; flck.l_len = 0; flck.l_type = F_UNLCK; flck.l_whence = SEEK_SET; if (::fcntl(fileno(fs_), F_SETLK, &flck)) { log_warn << "Could not unlock state file: " << ::strerror(errno); } fclose(fs_); } } void SavedState::get (wsrep_uuid_t& u, wsrep_seqno_t& s, bool& safe_to_bootstrap) { gu::Lock lock(mtx_); u = uuid_; s = seqno_; safe_to_bootstrap = safe_to_bootstrap_; } void SavedState::set (const wsrep_uuid_t& u, wsrep_seqno_t s, bool safe_to_bootstrap) { gu::Lock lock(mtx_); ++total_locks_; if (corrupt_) return; uuid_ = u; seqno_ = s; safe_to_bootstrap_ = safe_to_bootstrap; if (0 == unsafe_()) write_and_flush (u, s, safe_to_bootstrap); else log_debug << "Not writing state: unsafe counter is " << unsafe_(); } /* the goal of unsafe_, written_uuid_, current_len_ below is * 1. avoid unnecessary mutex locks * 2. if locked - avoid unnecessary file writes * 3. if writing - avoid metadata operations, write over existing space */ void SavedState::mark_unsafe() { ++total_marks_; if (1 == unsafe_.add_and_fetch (1)) { gu::Lock lock(mtx_); ++total_locks_; assert (unsafe_() > 0); if (written_uuid_ != WSREP_UUID_UNDEFINED) { write_and_flush (WSREP_UUID_UNDEFINED, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } } } void SavedState::mark_safe() { ++total_marks_; long count = unsafe_.sub_and_fetch (1); assert (count >= 0); if (0 == count) { gu::Lock lock(mtx_); ++total_locks_; if (0 == unsafe_() && (written_uuid_ != uuid_ || seqno_ >= 0)) { assert(false == corrupt_); /* this will write down proper seqno if set() was called too early * (in unsafe state) */ write_and_flush (uuid_, seqno_, safe_to_bootstrap_); } } } void SavedState::mark_corrupt() { /* Half LONG_MAX keeps us equally far from overflow and underflow by mark_unsafe()/mark_safe() calls */ unsafe_ = (std::numeric_limits::max() >> 1); gu::Lock lock(mtx_); ++total_locks_; if (corrupt_) return; uuid_ = WSREP_UUID_UNDEFINED; seqno_ = WSREP_SEQNO_UNDEFINED; corrupt_ = true; write_and_flush (WSREP_UUID_UNDEFINED, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } void SavedState::write_and_flush(const wsrep_uuid_t& u, const wsrep_seqno_t s, bool safe_to_bootstrap) { assert (current_len_ <= MAX_SIZE); if (fs_) { if (s >= 0) { log_debug << "Saving state: " << u << ':' << s; } char buf[MAX_SIZE]; const gu_uuid_t* const uu(reinterpret_cast(&u)); int state_len = snprintf (buf, MAX_SIZE - 1, "# GALERA saved state" "\nversion: " VERSION "\nuuid: " GU_UUID_FORMAT "\nseqno: %" PRId64 "\nsafe_to_bootstrap: %d\n", GU_UUID_ARGS(uu), s, safe_to_bootstrap); int write_size; for (write_size = state_len; write_size < current_len_; ++write_size) buf[write_size] = ' '; // overwrite whatever is there currently rewind(fs_); fwrite(buf, write_size, 1, fs_); fflush(fs_); current_len_ = state_len; written_uuid_ = u; ++total_writes_; } else { log_debug << "Can't save state: output stream is not open."; } } } /* namespace galera */ galera-3-25.3.20/galera/src/key_entry_os.cpp0000644000015300001660000000313413042054732020377 0ustar jenkinsjenkins// // Copyright (C) 2012 Codership Oy // #include "key_entry_os.hpp" #include "trx_handle.hpp" namespace galera { #ifndef NDEBUG void KeyEntryOS::assert_ref(TrxHandle* trx, bool full_key) const { assert(ref_trx_ == 0 || ref_trx_->global_seqno() <= trx->global_seqno()); if (full_key) { assert(ref_full_trx_ == 0 || (ref_full_trx_->global_seqno() <= trx->global_seqno() && ref_trx_ != 0)); } } void KeyEntryOS::assert_unref(TrxHandle* trx) const { if (ref_full_trx_ != 0 && ref_trx_ == 0) { log_fatal << "dereferencing EXCLUSIVE partial key: " << key_ << " by " << trx->global_seqno() << ", while full key referenced by " << ref_full_trx_->global_seqno(); assert(0); } } void KeyEntryOS::assert_ref_shared(TrxHandle* trx, bool full_key) const { assert(ref_shared_trx_ == 0 || ref_shared_trx_->global_seqno() <= trx->global_seqno()); if (full_key) { assert(ref_full_shared_trx_ == 0 || (ref_full_shared_trx_->global_seqno() <= trx->global_seqno() && ref_shared_trx_ != 0)); } } void KeyEntryOS::assert_unref_shared(TrxHandle* trx) const { if (ref_full_shared_trx_ != 0 && ref_shared_trx_ == 0) { log_fatal << "dereferencing SHARED partial key: " << key_ << " by " << trx->global_seqno() << ", while full key referenced by " << ref_full_shared_trx_->global_seqno(); assert(0); } } #endif /* NDEBUG */ } galera-3-25.3.20/galera/SConscript0000644000015300001660000000006413042054732016403 0ustar jenkinsjenkins SConscript(['src/SConscript', 'tests/SConscript']) galera-3-25.3.20/galera/tests/0000755000015300001660000000000013042054732015533 5ustar jenkinsjenkinsgalera-3-25.3.20/galera/tests/data_set_check.cpp0000644000015300001660000001606713042054732021172 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #undef NDEBUG #include "../src/data_set.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include using namespace galera; class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; class TestRecord { public: TestRecord (size_t size, const char* str) : size_(size), buf_(reinterpret_cast(::malloc(size_))), str_(reinterpret_cast(buf_) + sizeof(uint32_t)), own_(true) { if (0 == buf_) throw std::runtime_error("failed to allocate record"); void* tmp = const_cast(buf_); *reinterpret_cast(tmp) = htog32(size_); ::strncpy (const_cast(str_), str, size_ - 4); } TestRecord (const void* const buf, ssize_t const size) : size_(TestRecord::serial_size(buf, size)), buf_(buf), str_(reinterpret_cast(buf_) + sizeof(uint32_t)), own_(false) {} TestRecord (const TestRecord& t) : size_(t.size_), buf_(t.buf_), str_(t.str_), own_(false) {} virtual ~TestRecord () { if (own_) free (const_cast(buf_)); } const void* buf() const { return buf_; } const char* c_str() const { return str_; } ssize_t serial_size() const { return my_serial_size(); } static ssize_t serial_size(const void* const buf, ssize_t const size) { check_buf (buf, size, 1); return gtoh32 (*reinterpret_cast(buf)); } bool operator!= (const TestRecord& t) const { return (size_ != t.size_ || ::memcmp(buf_, t.buf_, size_)); } bool operator== (const TestRecord& t) const { return (!(*this != t)); } private: size_t const size_; const void* const buf_; const char* const str_; bool const own_; ssize_t my_serial_size () const { return size_; }; ssize_t my_serialize_to (void* buf, ssize_t size) const { check_buf (buf, size, size_); ::memcpy (buf, buf_, size_); return size_; } static void check_buf (const void* const buf, ssize_t const size, ssize_t min_size) { if (gu_unlikely (buf == 0 || size < min_size)) throw std::length_error("buffer too short"); } TestRecord& operator= (const TestRecord&); }; START_TEST (ver0) { size_t const MB = 1 << 20; TestRecord rout0(128, "abc0"); TestRecord rout1(127, "abc1"); TestRecord rout2(126, "012345"); TestRecord rout3(125, "defghij"); TestRecord rout4(3*MB, "klm"); TestRecord rout5(1*MB, "qpr"); std::vector records; records.push_back (&rout0); records.push_back (&rout1); records.push_back (&rout2); records.push_back (&rout3); records.push_back (&rout4); records.push_back (&rout5); gu::byte_t reserved[1024]; TestBaseName str("data_set_test"); DataSetOut dset_out(reserved, sizeof(reserved), str, DataSet::VER1); size_t offset(dset_out.size()); // this should be allocated inside current page offset += dset_out.append (rout0.buf(), rout0.serial_size(), true); fail_if (dset_out.size() != offset, "expected: %zu, got %zu", offset, dset_out.size()); // this should trigger new page since not stored offset += dset_out.append (rout1.buf(), rout1.serial_size(), false); fail_if (dset_out.size() != offset); // this should trigger new page since previous one was not stored offset += dset_out.append (rout2.buf(), rout2.serial_size(), true); fail_if (dset_out.size() != offset); // this should trigger a new page, since not stored offset += dset_out.append (rout3.buf(), rout3.serial_size(), false); fail_if (dset_out.size() != offset); // this should trigger new page, because won't fit in the current page offset += dset_out.append (rout4.buf(), rout4.serial_size(), true); fail_if (dset_out.size() != offset); // this should trigger new page, because 4MB RAM limit exceeded offset += dset_out.append (rout5.buf(), rout5.serial_size(), false); fail_if (dset_out.size() != offset); fail_if (1 != size_t(dset_out.count())); DataSetOut::GatherVector out_bufs; out_bufs().reserve (dset_out.page_count()); size_t min_out_size(0); for (size_t i = 0; i < records.size(); ++i) { min_out_size += records[i]->serial_size(); } size_t const out_size (dset_out.gather (out_bufs)); fail_if (out_size <= min_out_size || out_size > offset); fail_if (out_bufs->size() != static_cast(dset_out.page_count()), "Expected %zu buffers, got: %zd", dset_out.page_count(), out_bufs->size()); /* concatenate all buffers into one */ std::vector in_buf; in_buf.reserve(out_size); mark_point(); for (size_t i = 0; i < out_bufs->size(); ++i) { fail_if (0 == out_bufs[i].ptr); log_info << "\nadding buf " << i << ": " << gu::Hexdump(out_bufs[i].ptr, std::min(out_bufs[i].size, 24), true); size_t old_size = in_buf.size(); const gu::byte_t* ptr (reinterpret_cast(out_bufs[i].ptr)); in_buf.insert (in_buf.end(), ptr, ptr + out_bufs[i].size); fail_if (old_size + out_bufs[i].size != in_buf.size()); } fail_if (in_buf.size() != out_size, "Sent buf size: %zu, recvd buf size: %zu", out_size, in_buf.size()); log_info << "Resulting DataSet buffer:\n" << gu::Hexdump(in_buf.data(), 32, false) << '\n' << gu::Hexdump(in_buf.data(), 32, true); galera::DataSetIn const dset_in(dset_out.version(), in_buf.data(), in_buf.size()); fail_if (dset_in.size() != dset_out.size()); fail_if (dset_in.count() != dset_out.count()); for (ssize_t i = 0; i < dset_in.count(); ++i) { gu::Buf data = dset_in.next(); TestRecord const rin(data.ptr, data.size); fail_if (rin != *records[i], "Record %d failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } galera::DataSetIn dset_in_empty; dset_in_empty.init(dset_out.version(), in_buf.data(), in_buf.size()); fail_if (dset_in_empty.size() != dset_out.size()); fail_if (dset_in_empty.count() != dset_out.count()); for (ssize_t i = 0; i < dset_in_empty.count(); ++i) { gu::Buf data = dset_in_empty.next(); TestRecord const rin(data.ptr, data.size); fail_if (rin != *records[i], "Record %d failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } } END_TEST Suite* data_set_suite () { TCase* t = tcase_create ("DataSet"); tcase_add_test (t, ver0); tcase_set_timeout(t, 60); Suite* s = suite_create ("DataSet"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galera/tests/galera_check.cpp0000644000015300001660000000303013042054732020623 0ustar jenkinsjenkins/* * Copyright (C) 2012 Codership Oy */ #include #include #include #include /* * Suite descriptions: forward-declare and add to array */ typedef Suite* (*suite_creator_t) (void); extern Suite* data_set_suite(); extern Suite* key_set_suite(); extern Suite* write_set_ng_suite(); extern Suite* write_set_suite(); extern Suite* trx_handle_suite(); extern Suite* service_thd_suite(); extern Suite* ist_suite(); extern Suite* saved_state_suite(); static suite_creator_t suites[] = { data_set_suite, key_set_suite, write_set_ng_suite, write_set_suite, trx_handle_suite, service_thd_suite, ist_suite, saved_state_suite, 0 }; extern "C" { #include } #define LOG_FILE "galera_check.log" int main(int argc, char* argv[]) { bool no_fork = (argc >= 2 && std::string(argv[1]) == "nofork"); FILE* log_file = 0; if (!no_fork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); int failed = 0; for (int i = 0; suites[i] != 0; ++i) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all(sr, CK_NORMAL); failed += srunner_ntests_failed(sr); srunner_free(sr); } if (log_file != 0) fclose(log_file); printf ("Total tests failed: %d\n", failed); return failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-3-25.3.20/galera/tests/saved_state_check.cpp0000644000015300001660000001173613042054732021706 0ustar jenkinsjenkins/* * Copyright (C) 2012 Codership Oy */ #include "../src/saved_state.hpp" #include "../src/uuid.hpp" #include #include #include #define __STDC_FORMAT_MACROS #include static volatile bool stop(false); using namespace galera; static void* thread_routine (void* arg) { SavedState* st(static_cast(arg)); do { st->mark_unsafe(); st->mark_safe(); } while (!stop); return NULL; } static const int max_threads(16); static pthread_t threads[max_threads]; static void start_threads(void* arg) { stop = false; for (int ret = 0; ret < max_threads; ++ret) { pthread_t t; int err = pthread_create (&t, NULL, thread_routine, arg); fail_if (err, "Failed to start thread %d: %d (%s)", ret, err, strerror(err)); threads[ret] = t; } } static void stop_threads() { stop = true; for (int t = 0; t < max_threads; ++t) { pthread_join(threads[t], NULL); } } static const char* fname("grastate.dat"); START_TEST(test_basic) { unlink (fname); wsrep_uuid_t uuid; wsrep_seqno_t seqno; bool safe_to_bootstrap; { SavedState st(fname); st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid != WSREP_UUID_UNDEFINED); fail_if (seqno != WSREP_SEQNO_UNDEFINED); fail_if (safe_to_bootstrap != true); gu_uuid_from_string("b2c01654-8dfe-11e1-0800-a834d641cfb5", to_gu_uuid(uuid)); seqno = 2345234LL; st.set(uuid, seqno, false); } { SavedState st(fname); wsrep_uuid_t u; wsrep_seqno_t s; bool stb; st.get(u, s, stb); fail_if (u != uuid); fail_if (s != seqno); fail_if (stb != false); } } END_TEST #define TEST_USLEEP 2500 // 2.5ms START_TEST(test_unsafe) { SavedState st(fname); wsrep_uuid_t uuid; wsrep_seqno_t seqno; bool safe_to_bootstrap; st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid == WSREP_UUID_UNDEFINED); fail_if (seqno == WSREP_SEQNO_UNDEFINED); fail_if (safe_to_bootstrap == true); st.set(uuid, WSREP_SEQNO_UNDEFINED, false); for (int i = 0; i < 100; ++i) { start_threads(&st); mark_point(); usleep (TEST_USLEEP); st.set(uuid, i, false); // make sure that state is not lost if set concurrently mark_point(); usleep (TEST_USLEEP); stop_threads(); mark_point(); st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid == WSREP_UUID_UNDEFINED); fail_if (seqno != i); fail_if (safe_to_bootstrap != false); } long marks, locks, writes; st.stats(marks, locks, writes); log_info << "Total marks: " << marks << ", total writes: " << writes << ", total locks: " << locks << "\nlocks ratio: " << (double(locks)/marks) << "\nwrites ratio: " << (double(writes)/locks); } END_TEST START_TEST(test_corrupt) { wsrep_uuid_t uuid; wsrep_seqno_t seqno; bool safe_to_bootstrap; { SavedState st(fname); st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid == WSREP_UUID_UNDEFINED); fail_if (seqno == WSREP_SEQNO_UNDEFINED); fail_if (safe_to_bootstrap == true); st.set(uuid, WSREP_SEQNO_UNDEFINED, false); } long marks(0), locks(0), writes(0); for (int i = 0; i < 100; ++i) { SavedState st(fname); // explicitly overwrite corruption mark. st.set (uuid, seqno, false); start_threads(&st); mark_point(); usleep (TEST_USLEEP); st.mark_corrupt(); st.set (uuid, seqno, false); // make sure that corrupt stays usleep (TEST_USLEEP); mark_point(); stop_threads(); mark_point(); wsrep_uuid_t u; wsrep_seqno_t s; bool stb; st.get(u, s, stb); // make sure that mark_corrupt() stays fail_if (u != WSREP_UUID_UNDEFINED); fail_if (s != WSREP_SEQNO_UNDEFINED); fail_if (stb != false); long m, l, w; st.stats(m, l, w); marks += m; locks += l; writes += w; } log_info << "Total marks: " << marks << ", total locks: " << locks << ", total writes: " << writes << "\nlocks ratio: " << (double(locks)/marks) << "\nwrites ratio: " << (double(writes)/locks); unlink (fname); } END_TEST #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (TEST_USLEEP); }} Suite* saved_state_suite() { Suite* s = suite_create ("saved_state"); TCase* tc; tc = tcase_create ("saved_state"); tcase_add_test (tc, test_basic); tcase_add_test (tc, test_unsafe); tcase_add_test (tc, test_corrupt); tcase_set_timeout(tc, 120); suite_add_tcase (s, tc); return s; } galera-3-25.3.20/galera/tests/key_set_check.cpp0000644000015300001660000001656213042054732021051 0ustar jenkinsjenkins/* copyright (C) 2013 Codership Oy * * $Id$ */ #undef NDEBUG #include "test_key.hpp" #include "../src/key_set.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include using namespace galera; class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; static size_t version_to_hash_size (KeySet::Version const ver) { switch (ver) { case KeySet::FLAT16: fail("FLAT16 is not supported by test"); case KeySet::FLAT16A: return 16; case KeySet::FLAT8: fail ("FLAT8 is not supported by test"); case KeySet::FLAT8A: return 8; default: fail ("Unsupported KeySet verison: %d", ver); } abort(); } START_TEST (ver0) { KeySet::Version const tk_ver(KeySet::FLAT16A); size_t const base_size(version_to_hash_size(tk_ver)); gu::byte_t reserved[1024]; TestBaseName const str("key_set_test"); KeySetOut kso (reserved, sizeof(reserved), str, tk_ver); fail_if (kso.count() != 0); size_t total_size(kso.size()); log_info << "Start size: " << total_size; TestKey tk0(tk_ver, SHARED, false, "a0"); kso.append(tk0()); fail_if (kso.count() != 1); total_size += base_size + 2 + 1*4; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); kso.append(tk0()); fail_if (kso.count() != 1); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); TestKey tk1(tk_ver, SHARED, true, "a0", "a1", "a2"); mark_point(); kso.append(tk1()); fail_if (kso.count() != 3, "key count: expected 3, got %d", kso.count()); total_size += base_size + 2 + 2*4; total_size += base_size + 2 + 3*4; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); TestKey tk2(tk_ver, EXCLUSIVE, false, "a0", "a1", "b2"); kso.append(tk2()); fail_if (kso.count() != 4, "key count: expected 4, got %d", kso.count()); total_size += base_size + 2 + 3*4; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* it is a duplicate, but it should add an exclusive verision of the key */ TestKey tk3(tk_ver, EXCLUSIVE, true, "a0", "a1"); log_info << "######## Appending exclusive duplicate tk3: begin"; kso.append(tk3()); log_info << "######## Appending exclusive duplicate tk3: end"; fail_if (kso.count() != 5, "key count: expected 5, got %d", kso.count()); total_size += base_size + 2 + 2*4; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* tk3 should make it impossible to add anything past a0:a1 */ TestKey tk4(tk_ver, EXCLUSIVE, false, "a0", "a1", "c2"); kso.append(tk4()); fail_if (kso.count() != 5, "key count: expected 5, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* adding shared key should have no effect */ TestKey tk5(tk_ver, SHARED, true, "a0", "a1"); kso.append(tk5()); fail_if (kso.count() != 5, "key count: expected 5, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* tk5 should not make any changes */ TestKey tk6(tk_ver, EXCLUSIVE, false, "a0", "a1", "c2"); kso.append(tk6()); fail_if (kso.count() != 5, "key count: expected 5, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* a0:b1:... should still be possible, should add 2 keys: b1 and c2 */ TestKey tk7(tk_ver, EXCLUSIVE, true, "a0", "b1", "c2"); kso.append(tk7()); fail_if (kso.count() != 7, "key count: expected 7, got %d", kso.count()); total_size += base_size + 2 + 2*4; total_size += base_size + 2 + 3*4; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* make sure a0:b1:b2 is possible despite we have a0:a1:b2 already * (should be no collision on b2) */ TestKey tk8(tk_ver, EXCLUSIVE, false, "a0", "b1", "b2"); kso.append(tk8()); fail_if (kso.count() != 8, "key count: expected 8, got %d", kso.count()); total_size += base_size + 2 + 3*4; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); log_info << "size before huge key: " << total_size; char huge_key[2048]; memset (huge_key, 'x', sizeof(huge_key)); huge_key[ sizeof(huge_key) - 1 ] = 0; TestKey tk9(tk_ver, EXCLUSIVE, false, huge_key, huge_key, huge_key); kso.append(tk9()); fail_if (kso.count() != 11, "key count: expected 11, got %d", kso.count()); total_size += base_size + 2 + 1*256; total_size += base_size + 2 + 2*256; total_size += base_size + 2 + 3*256; fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); log_info << "End size: " << kso.size(); KeySetOut::GatherVector out; out->reserve(kso.page_count()); size_t const out_size(kso.gather(out)); log_info << "Gather size: " << out_size << ", buf count: " << out->size(); std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(reinterpret_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } fail_if (in.size() != out_size); KeySetIn ksi (kso.version(), in.data(), in.size()); fail_if (ksi.count() != kso.count(), "Received keys: %zu, expected: %zu", ksi.count(), kso.count()); fail_if (ksi.size() != kso.size(), "Received size: %zu, expected: %zu", ksi.size(), kso.size()); try { ksi.checksum(); } catch (std::exception& e) { fail("%s", e.what()); } int shared(0); // to stiffle clang complaints about unused variables for (int i(0); i < ksi.count(); ++i) { KeySet::KeyPart kp(ksi.next()); shared += kp.shared(); } KeySetIn ksi_empty; fail_if (ksi_empty.count() != 0, "Received keys: %zu, expected: %zu", ksi_empty.count(), 0); fail_if (ksi_empty.size() != 0, "Received size: %zu, expected: %zu", ksi_empty.size(), 0); ksi_empty.init (kso.version(), in.data(), in.size()); fail_if (ksi_empty.count() != kso.count(), "Received keys: %zu, expected: %zu", ksi_empty.count(), kso.count()); fail_if (ksi_empty.size() != kso.size(), "Received size: %zu, expected: %zu", ksi_empty.size(), kso.size()); try { ksi_empty.checksum(); } catch (std::exception& e) { fail("%s", e.what()); } for (int i(0); i < ksi_empty.count(); ++i) { KeySet::KeyPart kp(ksi_empty.next()); shared += kp.shared(); } ksi_empty.rewind(); for (int i(0); i < ksi_empty.count(); ++i) { KeySet::KeyPart kp(ksi_empty.next()); shared += kp.shared(); } fail_if(0 == shared); } END_TEST Suite* key_set_suite () { TCase* t = tcase_create ("KeySet"); tcase_add_test (t, ver0); tcase_set_timeout(t, 60); Suite* s = suite_create ("KeySet"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galera/tests/trx_handle_check.cpp0000644000015300001660000001603013042054732021524 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // #include "trx_handle.hpp" #include "uuid.hpp" #include using namespace std; using namespace galera; START_TEST(test_states) { TrxHandle::LocalPool tp(TrxHandle::LOCAL_STORAGE_SIZE(), 16, "test_states"); wsrep_uuid_t uuid = {{1, }}; // first check basic stuff // 1) initial state is executing // 2) invalid state changes are caught // 3) valid state changes change state TrxHandle* trx(TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1)); log_info << *trx; fail_unless(trx->state() == TrxHandle::S_EXECUTING); #if 0 // now setting wrong state results in abort try { trx->set_state(TrxHandle::S_COMMITTED); fail(""); } catch (...) { fail_unless(trx->state() == TrxHandle::S_EXECUTING); } #endif trx->set_state(TrxHandle::S_REPLICATING); fail_unless(trx->state() == TrxHandle::S_REPLICATING); trx->unref(); // abort before replication trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_ABORTING); trx->set_state(TrxHandle::S_ROLLED_BACK); trx->unref(); // aborted during replication and does not certify trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_ABORTING); trx->set_state(TrxHandle::S_ROLLED_BACK); trx->unref(); // aborted during replication and certifies but does not certify // during replay (is this even possible?) trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_MUST_CERT_AND_REPLAY); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_ABORTING); trx->set_state(TrxHandle::S_ROLLED_BACK); trx->unref(); // aborted during replication, certifies and commits trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_MUST_CERT_AND_REPLAY); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_MUST_REPLAY_AM); trx->set_state(TrxHandle::S_MUST_REPLAY_CM); trx->set_state(TrxHandle::S_MUST_REPLAY); trx->set_state(TrxHandle::S_REPLAYING); trx->set_state(TrxHandle::S_COMMITTED); trx->unref(); // aborted during certification, replays and commits trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_MUST_CERT_AND_REPLAY); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_MUST_REPLAY_AM); trx->set_state(TrxHandle::S_MUST_REPLAY_CM); trx->set_state(TrxHandle::S_MUST_REPLAY); trx->set_state(TrxHandle::S_REPLAYING); trx->set_state(TrxHandle::S_COMMITTED); trx->unref(); // aborted while waiting applying, replays and commits trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_APPLYING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_MUST_REPLAY_AM); trx->set_state(TrxHandle::S_MUST_REPLAY_CM); trx->set_state(TrxHandle::S_MUST_REPLAY); trx->set_state(TrxHandle::S_REPLAYING); trx->set_state(TrxHandle::S_COMMITTED); trx->unref(); // aborted while waiting for commit order, replays and commits trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_APPLYING); trx->set_state(TrxHandle::S_COMMITTING); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_MUST_REPLAY_CM); trx->set_state(TrxHandle::S_MUST_REPLAY); trx->set_state(TrxHandle::S_REPLAYING); trx->set_state(TrxHandle::S_COMMITTED); trx->unref(); // smooth operation trx = TrxHandle::New(tp, TrxHandle::Defaults, uuid, -1, 1); trx->set_state(TrxHandle::S_REPLICATING); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_APPLYING); trx->set_state(TrxHandle::S_COMMITTING); trx->set_state(TrxHandle::S_COMMITTED); trx->unref(); } END_TEST START_TEST(test_serialization) { TrxHandle::LocalPool lp(4096, 16, "serialization_lp"); TrxHandle::SlavePool sp(sizeof(TrxHandle), 16, "serialization_sp"); int const version(0); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); wsrep_uuid_t uuid; gu_uuid_generate(reinterpret_cast(&uuid), 0, 0); TrxHandle* trx(TrxHandle::New(lp, trx_params, uuid, 4567, 8910)); fail_unless(trx->serial_size() == 4 + 16 + 8 + 8 + 8 + 8); trx->set_flags(trx->flags() | TrxHandle::F_MAC_HEADER); fail_unless(trx->serial_size() == 4 + 16 + 8 + 8 + 8 + 8 + 2); trx->set_flags(trx->flags() & ~TrxHandle::F_MAC_HEADER); fail_unless(trx->serial_size() == 4 + 16 + 8 + 8 + 8 + 8); trx->append_annotation(reinterpret_cast("foobar"), strlen("foobar")); trx->set_flags(trx->flags() | TrxHandle::F_ANNOTATION); fail_unless(trx->serial_size() == 4 + 16 + 8 + 8 + 8 + 8 + 4 + 6); trx->set_flags(trx->flags() & ~TrxHandle::F_ANNOTATION); fail_unless(trx->serial_size() == 4 + 16 + 8 + 8 + 8 + 8); trx->set_last_seen_seqno(0); TrxHandle* trx2(TrxHandle::New(sp)); std::vector buf(trx->serial_size()); fail_unless(trx->serialize(&buf[0], buf.size(), 0) > 0); fail_unless(trx2->unserialize(&buf[0], buf.size(), 0) > 0); trx2->unref(); trx2 = TrxHandle::New(sp); trx->set_flags(trx->flags() | TrxHandle::F_MAC_PAYLOAD); buf.resize(trx->serial_size()); fail_unless(trx->serialize(&buf[0], buf.size(), 0) > 0); fail_unless(trx2->unserialize(&buf[0], buf.size(), 0) > 0); trx2->unref(); trx2 = TrxHandle::New(sp); trx->set_flags(trx->flags() | TrxHandle::F_ANNOTATION); buf.resize(trx->serial_size()); fail_unless(trx->serialize(&buf[0], buf.size(), 0) > 0); fail_unless(trx2->unserialize(&buf[0], buf.size(), 0) > 0); fail_unless(trx2->serial_size() == trx->serial_size(), "got serial_size(*trx2) = %zu, serial_size(*trx) = %zu", trx2->serial_size(), trx->serial_size()); trx2->unref(); trx->unref(); } END_TEST Suite* trx_handle_suite() { Suite* s = suite_create("trx_handle"); TCase* tc; tc = tcase_create("test_states"); tcase_add_test(tc, test_states); suite_add_tcase(s, tc); tc = tcase_create("test_serialization"); tcase_add_test(tc, test_serialization); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/galera/tests/SConscript0000644000015300001660000000262413042054732017551 0ustar jenkinsjenkins Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcache/src #/gcs/src #/galera/src ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) env.Prepend(LIBS=File('#/gcs/src/libgcs.a')) env.Prepend(LIBS=File('#/galera/src/libgalera++.a')) env.Prepend(LIBS=File('#/gcache/src/libgcache.a')) galera_check = env.Program(target='galera_check', source=Split(''' galera_check.cpp data_set_check.cpp key_set_check.cpp write_set_ng_check.cpp write_set_check.cpp trx_handle_check.cpp service_thd_check.cpp ist_check.cpp saved_state_check.cpp ''')) stamp = "galera_check.passed" env.Test(stamp, galera_check) env.Alias("test", stamp) Clean(galera_check, ['#/galera_check.log', 'ist_check.cache']) galera-3-25.3.20/galera/tests/test_key.hpp0000644000015300001660000000465113042054732020101 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef _TEST_KEY_HPP_ #define _TEST_KEY_HPP_ #include "../src/key_data.hpp" #include "../src/key_set.hpp" // for version_to_hash_size #include // for version_to_hash_size #include #include using namespace galera; class TestKey { public: TestKey (int a, int ver, bool exclusive, std::vector parts, bool copy = true) : parts_ (), ver_ (ver), exclusive_(exclusive), copy_ (copy) { parts_.reserve(parts.size()); for (size_t i = 0; i < parts.size(); ++i) { size_t p_len(parts[i] ? strlen(parts[i]) + 1 : 0); wsrep_buf_t b = { parts[i], p_len }; parts_.push_back(b); } } TestKey (int ver, bool exclusive, bool copy, const char* part0, const char* part1 = 0, const char* part2 = 0, const char* part3 = 0, const char* part4 = 0, const char* part5 = 0, const char* part6 = 0, const char* part7 = 0, const char* part8 = 0, const char* part9 = 0 ) : parts_ (), ver_ (ver), exclusive_(exclusive), copy_ (copy) { parts_.reserve(10); (push_back(part0) && push_back(part1) && push_back(part2) && push_back(part3) && push_back(part4) && push_back(part5) && push_back(part6) && push_back(part7) && push_back(part8) && push_back(part9)); } KeyData operator() () { return KeyData (ver_, parts_.data(), parts_.size(), exclusive_ ? WSREP_KEY_EXCLUSIVE : WSREP_KEY_SHARED, copy_); } private: std::vector parts_; int const ver_; bool const exclusive_; bool const copy_; bool push_back (const char* const p) { size_t p_len(-1); if (p && (p_len = strlen(p) + 1) > 0) { wsrep_buf_t b = { p, p_len }; parts_.push_back(b); return true; } return false; } }; enum { SHARED = false, EXCLUSIVE = true }; #endif /* _TEST_KEY_HPP_ */ galera-3-25.3.20/galera/tests/service_thd_check.cpp0000644000015300001660000000670613042054732021704 0ustar jenkinsjenkins/* * Copyright (C) 2010-2014 Codership Oy */ #define __STDC_FORMAT_MACROS #include "../src/galera_service_thd.hpp" #include "../src/replicator_smm.hpp" #include #include #include namespace { class TestEnv { class GCache_setup { public: GCache_setup(gu::Config& conf) : name_("service_thd_check.gcache") { conf.set("gcache.name", name_); conf.set("gcache.size", "4M"); log_info << "conf for gcache: " << conf; } ~GCache_setup() { unlink(name_.c_str()); } private: std::string const name_; }; public: TestEnv() : conf_ (), init_ (conf_, NULL, NULL), gcache_setup_(conf_), gcache_ (conf_, "."), gcs_ (conf_, gcache_) {} gcache::GCache& gcache() { return gcache_; } galera::DummyGcs& gcs() { return gcs_; } private: gu::Config conf_; galera::ReplicatorSMM::InitConfig init_; GCache_setup gcache_setup_; gcache::GCache gcache_; galera::DummyGcs gcs_; }; } using namespace galera; START_TEST(service_thd1) { TestEnv env; ServiceThd* thd = new ServiceThd(env.gcs(), env.gcache()); fail_if (thd == 0); delete thd; } END_TEST #define TEST_USLEEP 1000 // 1ms #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (TEST_USLEEP); }} START_TEST(service_thd2) { TestEnv env; DummyGcs& conn(env.gcs()); ServiceThd* thd = new ServiceThd(conn, env.gcache()); fail_if (thd == 0); conn.set_last_applied(0); gcs_seqno_t seqno = 1; thd->report_last_committed (seqno); thd->flush(); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); seqno = 5; thd->report_last_committed (seqno); thd->flush(); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); thd->report_last_committed (3); thd->flush(); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); thd->reset(); seqno = 3; thd->report_last_committed (seqno); thd->flush(); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); delete thd; } END_TEST START_TEST(service_thd3) { TestEnv env; ServiceThd* thd = new ServiceThd(env.gcs(), env.gcache()); fail_if (thd == 0); // so far for empty GCache the following should be a noop. thd->release_seqno(-1); thd->release_seqno(2345); thd->release_seqno(234645676); delete thd; } END_TEST Suite* service_thd_suite() { Suite* s = suite_create ("service_thd"); TCase* tc; tc = tcase_create ("service_thd"); tcase_add_test (tc, service_thd1); tcase_add_test (tc, service_thd2); tcase_add_test (tc, service_thd3); suite_add_tcase (s, tc); return s; } galera-3-25.3.20/galera/tests/write_set_ng_check.cpp0000644000015300001660000002317313042054732022073 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #undef NDEBUG #include "test_key.hpp" #include "../src/write_set_ng.hpp" #include "gu_uuid.h" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include using namespace galera; START_TEST (ver3_basic) { uint16_t const flag1(0xabcd); wsrep_uuid_t source; gu_uuid_generate (reinterpret_cast(&source), NULL, 0); wsrep_conn_id_t const conn(652653); wsrep_trx_id_t const trx(99994952); std::string const dir("."); wsrep_trx_id_t trx_id(1); WriteSetOut wso (dir, trx_id, KeySet::FLAT8A, 0, 0, flag1,WriteSetNG::VER3); fail_unless (wso.is_empty()); // keep SHARED here, see loop below TestKey tk0(KeySet::MAX_VERSION, SHARED, true, "a0"); wso.append_key(tk0()); fail_if (wso.is_empty()); uint64_t const data_out_volatile(0xaabbccdd); uint32_t const data_out_persistent(0xffeeddcc); uint16_t const flag2(0x1234); { uint64_t const d(data_out_volatile); wso.append_data (&d, sizeof(d), true); } wso.append_data (&data_out_persistent, sizeof(data_out_persistent), false); wso.add_flags (flag2); uint16_t const flags(flag1 | flag2); WriteSetNG::GatherVector out; size_t const out_size(wso.gather(source, conn, trx, out)); log_info << "Gather size: " << out_size << ", buf count: " << out->size(); wsrep_seqno_t const last_seen(1); wsrep_seqno_t const seqno(2); int const pa_range(seqno - last_seen); wso.set_last_seen(last_seen); /* concatenate all out buffers */ std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(static_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } fail_if (in.size() != out_size); gu::Buf const in_buf = { in.data(), static_cast(in.size()) }; /* read ws buffer and "certify" */ { mark_point(); WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); wsrep_seqno_t const ls(wsi.last_seen()); fail_if (ls != last_seen, "Found last seen: %lld, expected: %lld", ls, last_seen); fail_if (wsi.flags() != flags); fail_if (0 == wsi.timestamp()); fail_if (wsi.annotated()); mark_point(); const KeySetIn& ksi(wsi.keyset()); fail_if (ksi.count() != 1); mark_point(); int shared(0); for (int i(0); i < ksi.count(); ++i) { KeySet::KeyPart kp(ksi.next()); shared += kp.shared(); } fail_unless(shared > 0); wsi.verify_checksum(); wsi.set_seqno (seqno, pa_range); fail_unless(wsi.certified(), "wsi.certified: %d" "\nwsi.pa_range = %lld\n pa_range = %lld", static_cast(wsi.certified()), wsi.pa_range(), pa_range); } /* repeat reading buffer after "certification" */ { WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); fail_unless(wsi.certified()); fail_if (wsi.seqno() != seqno); fail_if (wsi.flags() != flags); fail_if (0 == wsi.timestamp()); mark_point(); const KeySetIn& ksi(wsi.keyset()); fail_if (ksi.count() != 1); mark_point(); int shared(0); for (int i(0); i < ksi.count(); ++i) { KeySet::KeyPart kp(ksi.next()); shared += kp.shared(); } fail_unless(shared > 0); wsi.verify_checksum(); mark_point(); const DataSetIn& dsi(wsi.dataset()); fail_if (dsi.count() != 1); mark_point(); gu::Buf const d(dsi.next()); fail_if (d.size != sizeof(data_out_volatile) + sizeof(data_out_persistent)); const char* dptr = static_cast(d.ptr); fail_if (*(reinterpret_cast(dptr)) != data_out_volatile); fail_if (*(reinterpret_cast (dptr + sizeof(data_out_volatile))) != data_out_persistent); mark_point(); const DataSetIn& usi(wsi.unrdset()); fail_if (usi.count() != 0); fail_if (usi.size() != 0); } mark_point(); try /* this is to test checksum after set_seqno() */ { WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); fail_unless(wsi.certified()); fail_if (wsi.pa_range() != pa_range); fail_if (wsi.seqno() != seqno); fail_if (memcmp(&wsi.source_id(), &source, sizeof(source))); fail_if (wsi.conn_id() != conn); fail_if (wsi.trx_id() != trx); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } mark_point(); /* this is to test reassembly without keys and unordered data after gather() * + late initialization */ try { WriteSetIn tmp_wsi(in_buf); WriteSetIn::GatherVector out; mark_point(); tmp_wsi.verify_checksum(); gu_trace(tmp_wsi.gather(out, false, false)); // no keys or unrd /* concatenate all out buffers */ std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr (static_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } mark_point(); gu::Buf tmp_buf = { in.data(), static_cast(in.size()) }; WriteSetIn wsi; // first - create an empty writeset wsi.read_buf(tmp_buf); // next - initialize from buffer wsi.verify_checksum(); fail_unless(wsi.certified()); fail_if (wsi.pa_range() != pa_range); fail_if (wsi.seqno() != seqno); fail_if (wsi.keyset().count() != 0); fail_if (wsi.dataset().count() == 0); fail_if (wsi.unrdset().count() != 0); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } in[in.size() - 1] ^= 1; // corrupted the last byte (payload) mark_point(); try /* this is to test payload corruption */ { WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); fail("payload corruption slipped through 1"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } try /* this is to test background checksumming + corruption */ { WriteSetIn wsi(in_buf, 2); mark_point(); try { wsi.verify_checksum(); fail("payload corruption slipped through 2"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } } catch (std::exception& e) { fail("%s", e.what()); } in[2] ^= 1; // corrupted 3rd byte of header try /* this is to test header corruption */ { WriteSetIn wsi(in_buf, 2 /* this should postpone payload checksum */); wsi.verify_checksum(); fail("header corruption slipped through"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } } END_TEST START_TEST (ver3_annotation) { uint16_t const flag1(0xabcd); wsrep_uuid_t source; gu_uuid_generate (reinterpret_cast(&source), NULL, 0); wsrep_conn_id_t const conn(652653); wsrep_trx_id_t const trx(99994952); std::string const dir("."); wsrep_trx_id_t trx_id(1); WriteSetOut wso (dir, trx_id, KeySet::FLAT16, 0, 0, flag1,WriteSetNG::VER3); fail_unless (wso.is_empty()); TestKey tk0(KeySet::MAX_VERSION, SHARED, true, "key0"); wso.append_key(tk0()); fail_if (wso.is_empty()); uint64_t const data(0xaabbccdd); std::string const annotation("0xaabbccdd"); uint16_t const flag2(0x1234); wso.append_data (&data, sizeof(data), true); wso.append_annotation (annotation.c_str(), annotation.size(), true); wso.add_flags (flag2); uint16_t const flags(flag1 | flag2); WriteSetNG::GatherVector out; size_t const out_size(wso.gather(source, conn, trx, out)); log_info << "Gather size: " << out_size << ", buf count: " << out->size(); wsrep_seqno_t const last_seen(1); wso.set_last_seen(last_seen); /* concatenate all out buffers */ std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(static_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } fail_if (in.size() != out_size); gu::Buf const in_buf = { in.data(), static_cast(in.size()) }; /* read buffer into WriteSetIn */ mark_point(); WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); wsrep_seqno_t const ls(wsi.last_seen()); fail_if (ls != last_seen, "Found last seen: %lld, expected: %lld", ls, last_seen); fail_if (wsi.flags() != flags); fail_if (0 == wsi.timestamp()); wsi.verify_checksum(); fail_if (!wsi.annotated()); /* check that annotation has survived */ std::ostringstream os; wsi.write_annotation(os); std::string const res(os.str()); fail_if(annotation.length() != res.length()); fail_if(annotation != res); } END_TEST Suite* write_set_ng_suite () { TCase* t = tcase_create ("WriteSet"); tcase_add_test (t, ver3_basic); tcase_add_test (t, ver3_annotation); tcase_set_timeout(t, 60); Suite* s = suite_create ("WriteSet"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galera/tests/write_set_check.cpp0000644000015300001660000005117513042054732021412 0ustar jenkinsjenkins/* * Copyright (C) 2010-2014 Codership Oy */ #include "write_set.hpp" #include "mapped_buffer.hpp" #include "gu_logger.hpp" #include "certification.hpp" #include "replicator_smm.hpp" #include "wsdb.cpp" #include "gcs_action_source.hpp" #include "galera_service_thd.hpp" #include #include namespace { class TestEnv { class GCache_setup { public: GCache_setup(gu::Config& conf) : name_("write_set_test.gcache") { conf.set("gcache.name", name_); conf.set("gcache.size", "4M"); log_info << "conf for gcache: " << conf; } ~GCache_setup() { unlink(name_.c_str()); } private: std::string const name_; }; public: TestEnv() : conf_ (), init_ (conf_, NULL, NULL), gcache_setup_(conf_), gcache_ (conf_, "."), gcs_ (conf_, gcache_), thd_ (gcs_, gcache_) {} ~TestEnv() {} gu::Config& conf() { return conf_; } galera::ServiceThd& thd() { return thd_; } private: gu::Config conf_; galera::ReplicatorSMM::InitConfig init_; GCache_setup gcache_setup_; gcache::GCache gcache_; galera::DummyGcs gcs_; galera::ServiceThd thd_; }; } using namespace std; using namespace galera; typedef std::vector KeyPartSequence; START_TEST(test_key1) { static char k1[16]; static char k2[256]; static char k3[1 << 21]; static char k4[(1 << 22) - 5]; memset(k1, 0xab, sizeof(k1)); memset(k2, 0xcd, sizeof(k2)); memset(k3, 0x9e, sizeof(k3)); memset(k4, 0x8f, sizeof(k4)); const wsrep_buf_t kiovec[4] = { {k1, sizeof k1 }, {k2, sizeof k2 }, {k3, sizeof k3 }, {k4, sizeof k4 } }; KeyOS key(1, kiovec, 4, 0); size_t expected_size(0); #ifndef GALERA_KEY_VLQ expected_size += 1 + std::min(sizeof k1, size_t(0xff)); expected_size += 1 + std::min(sizeof k2, size_t(0xff)); expected_size += 1 + std::min(sizeof k3, size_t(0xff)); expected_size += 1 + std::min(sizeof k4, size_t(0xff)); expected_size += sizeof(uint16_t); #else expected_size += gu::uleb128_size(sizeof k1) + sizeof k1; expected_size += gu::uleb128_size(sizeof k2) + sizeof k2; expected_size += gu::uleb128_size(sizeof k3) + sizeof k3; expected_size += gu::uleb128_size(sizeof k4) + sizeof k4; expected_size += gu::uleb128_size(expected_size); #endif fail_unless(key.serial_size() == expected_size, "%ld <-> %ld", key.serial_size(), expected_size); KeyPartSequence kp(key.key_parts()); fail_unless(kp.size() == 4); gu::Buffer buf(key.serial_size()); key.serialize(&buf[0], buf.size(), 0); KeyOS key2(1); key2.unserialize(&buf[0], buf.size(), 0); fail_unless(key2 == key); } END_TEST START_TEST(test_key2) { static char k1[16]; static char k2[256]; static char k3[1 << 21]; static char k4[(1 << 22) - 5]; memset(k1, 0xab, sizeof(k1)); memset(k2, 0xcd, sizeof(k2)); memset(k3, 0x9e, sizeof(k3)); memset(k4, 0x8f, sizeof(k4)); const wsrep_buf_t kiovec[4] = { {k1, sizeof k1 }, {k2, sizeof k2 }, {k3, sizeof k3 }, {k4, sizeof k4 } }; KeyOS key(2, kiovec, 4, 0); size_t expected_size(0); expected_size += 1; // flags #ifndef GALERA_KEY_VLQ expected_size += 1 + std::min(sizeof k1, size_t(0xff)); expected_size += 1 + std::min(sizeof k2, size_t(0xff)); expected_size += 1 + std::min(sizeof k3, size_t(0xff)); expected_size += 1 + std::min(sizeof k4, size_t(0xff)); expected_size += sizeof(uint16_t); #else expected_size += gu::uleb128_size(sizeof k1) + sizeof k1; expected_size += gu::uleb128_size(sizeof k2) + sizeof k2; expected_size += gu::uleb128_size(sizeof k3) + sizeof k3; expected_size += gu::uleb128_size(sizeof k4) + sizeof k4; expected_size += gu::uleb128_size(expected_size); #endif fail_unless(key.serial_size() == expected_size, "%ld <-> %ld", key.serial_size(), expected_size); KeyPartSequence kp(key.key_parts()); fail_unless(kp.size() == 4); gu::Buffer buf(key.serial_size()); key.serialize(&buf[0], buf.size(), 0); KeyOS key2(2); key2.unserialize(&buf[0], buf.size(), 0); fail_unless(key2 == key); } END_TEST START_TEST(test_write_set1) { WriteSet ws(1); const wsrep_buf_t key1[2] = { {void_cast("dbt\0t1"), 6}, {void_cast("aaa") , 3} }; const wsrep_buf_t key2[2] = { {void_cast("dbt\0t2"), 6}, {void_cast("bbbb"), 4} }; const char* rbr = "rbrbuf"; size_t rbr_len = 6; log_info << "ws0 " << ws.serial_size(); ws.append_key(KeyData(1, key1, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws1 " << ws.serial_size(); ws.append_key(KeyData(1, key2, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws2 " << ws.serial_size(); ws.append_data(rbr, rbr_len); gu::Buffer rbrbuf(rbr, rbr + rbr_len); log_info << "rbrlen " << gu::serial_size4(rbrbuf); log_info << "wsrbr " << ws.serial_size(); gu::Buffer buf(ws.serial_size()); ws.serialize(&buf[0], buf.size(), 0); size_t expected_size = 4 // row key sequence size #ifndef GALERA_KEY_VLQ + 2 + 1 + 6 + 1 + 3 // key1 + 2 + 1 + 6 + 1 + 4 // key2 #else + 1 + 1 + 6 + 1 + 3 // key1 + 1 + 1 + 6 + 1 + 4 // key2 #endif + 4 + 6; // rbr fail_unless(buf.size() == expected_size, "%zd <-> %zd <-> %zd", buf.size(), expected_size, ws.serial_size()); WriteSet ws2(0); size_t ret = ws2.unserialize(&buf[0], buf.size(), 0); fail_unless(ret == expected_size); WriteSet::KeySequence rks; ws.get_keys(rks); WriteSet::KeySequence rks2; ws.get_keys(rks2); fail_unless(rks2 == rks); fail_unless(ws2.get_data() == ws.get_data()); } END_TEST START_TEST(test_write_set2) { WriteSet ws(2); const wsrep_buf_t key1[2] = { {void_cast("dbt\0t1"), 6}, {void_cast("aaa") , 3} }; const wsrep_buf_t key2[2] = { {void_cast("dbt\0t2"), 6}, {void_cast("bbbb"), 4} }; const char* rbr = "rbrbuf"; size_t rbr_len = 6; log_info << "ws0 " << ws.serial_size(); ws.append_key(KeyData(2, key1, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws1 " << ws.serial_size(); ws.append_key(KeyData(2, key2, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws2 " << ws.serial_size(); ws.append_data(rbr, rbr_len); gu::Buffer rbrbuf(rbr, rbr + rbr_len); log_info << "rbrlen " << gu::serial_size4(rbrbuf); log_info << "wsrbr " << ws.serial_size(); gu::Buffer buf(ws.serial_size()); ws.serialize(&buf[0], buf.size(), 0); size_t expected_size = 4 // row key sequence size #ifndef GALERA_KEY_VLQ + 2 + 1 + 1 + 6 + 1 + 3 // key1 + 2 + 1 + 1 + 6 + 1 + 4 // key2 #else + 1 + 1 + 6 + 1 + 3 // key1 + 1 + 1 + 6 + 1 + 4 // key2 #endif + 4 + 6; // rbr fail_unless(buf.size() == expected_size, "%zd <-> %zd <-> %zd", buf.size(), expected_size, ws.serial_size()); WriteSet ws2(2); size_t ret = ws2.unserialize(&buf[0], buf.size(), 0); fail_unless(ret == expected_size); WriteSet::KeySequence rks; ws.get_keys(rks); WriteSet::KeySequence rks2; ws2.get_keys(rks2); fail_unless(rks2 == rks); fail_unless(ws2.get_data() == ws.get_data()); } END_TEST START_TEST(test_mapped_buffer) { string wd("/tmp"); MappedBuffer mb(wd, 1 << 8); mb.resize(16); for (size_t i = 0; i < 16; ++i) { mb[i] = static_cast(i); } mb.resize(1 << 8); for (size_t i = 0; i < 16; ++i) { fail_unless(mb[i] == static_cast(i)); } for (size_t i = 16; i < (1 << 8); ++i) { mb[i] = static_cast(i); } mb.resize(1 << 20); for (size_t i = 0; i < (1 << 8); ++i) { fail_unless(mb[i] == static_cast(i)); } for (size_t i = 0; i < (1 << 20); ++i) { mb[i] = static_cast(i); } } END_TEST static TrxHandle::LocalPool lp(TrxHandle::LOCAL_STORAGE_SIZE(), 4, "ws_local_pool"); static TrxHandle::SlavePool sp(sizeof(TrxHandle), 4, "ws_slave_pool"); START_TEST(test_cert_hierarchical_v1) { log_info << "test_cert_hierarchical_v1"; struct wsinfo_ { wsrep_uuid_t uuid; wsrep_conn_id_t conn_id; wsrep_trx_id_t trx_id; wsrep_buf_t key[3]; size_t iov_len; wsrep_seqno_t local_seqno; wsrep_seqno_t global_seqno; wsrep_seqno_t last_seen_seqno; wsrep_seqno_t expected_depends_seqno; int flags; Certification::TestResult result; } wsi[] = { // 1 - 3, test symmetric case for dependencies // 1: no dependencies { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 1, 1, 0, 0, 0, Certification::TEST_OK}, // 2: depends on 1, no conflict { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, 2, 2, 0, 1, 0, Certification::TEST_OK}, // 3: depends on 2, no conflict { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 3, 3, 0, 2, 0, Certification::TEST_OK}, // 4 - 8, test symmetric case for conflicts // 4: depends on 3, no conflict { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 4, 4, 3, 3, 0, Certification::TEST_OK}, // 5: conflict with 4 { { {2, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, 5, 5, 3, -1, 0, Certification::TEST_FAILED}, // 6: depends on 4 (failed 5 not present in index), no conflict { { {2, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, 6, 6, 5, 4, 0, Certification::TEST_OK}, // 7: conflicts with 6 { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 7, 7, 5, -1, 0, Certification::TEST_FAILED}, // 8: to isolation: must not conflict, depends on global_seqno - 1 { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 8, 8, 5, 7, TrxHandle::F_ISOLATION, Certification::TEST_OK}, // 9: to isolation: must not conflict, depends on global_seqno - 1 { { {2, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 9, 9, 5, 8, TrxHandle::F_ISOLATION, Certification::TEST_OK}, }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); TestEnv env; galera::Certification cert(env.conf(), env.thd()); int const version(1); cert.assign_initial_position(0, version); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); mark_point(); for (size_t i(0); i < nws; ++i) { TrxHandle* trx(TrxHandle::New(lp, trx_params, wsi[i].uuid, wsi[i].conn_id, wsi[i].trx_id)); trx->append_key(KeyData(1, wsi[i].key, wsi[i].iov_len, WSREP_KEY_EXCLUSIVE, true)); trx->set_last_seen_seqno(wsi[i].last_seen_seqno); trx->set_flags(trx->flags() | wsi[i].flags); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); log_info << "ws[" << i << "]: " << buf.size() - offset; trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, wsi[i].local_seqno, wsi[i].global_seqno); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == wsi[i].result, "g: %lld r: %d er: %d", trx->global_seqno(), result, wsi[i].result); fail_unless(trx->depends_seqno() == wsi[i].expected_depends_seqno, "wsi: %zu g: %lld ld: %lld eld: %lld", i, trx->global_seqno(), trx->depends_seqno(), wsi[i].expected_depends_seqno); cert.set_trx_committed(trx); trx->unref(); } } END_TEST START_TEST(test_cert_hierarchical_v2) { log_info << "test_cert_hierarchical_v2"; const int version(2); struct wsinfo_ { wsrep_uuid_t uuid; wsrep_conn_id_t conn_id; wsrep_trx_id_t trx_id; wsrep_buf_t key[3]; size_t iov_len; bool shared; wsrep_seqno_t local_seqno; wsrep_seqno_t global_seqno; wsrep_seqno_t last_seen_seqno; wsrep_seqno_t expected_depends_seqno; int flags; Certification::TestResult result; } wsi[] = { // 1 - 4: shared - shared // First four cases are shared keys, they should not collide or // generate dependency // 1: no dependencies { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, true, 1, 1, 0, 0, 0, Certification::TEST_OK}, // 2: no dependencies { { {1, } }, 1, 2, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 2, 2, 0, 0, 0, Certification::TEST_OK}, // 3: no dependencies { { {2, } }, 1, 3, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 3, 3, 0, 0, 0, Certification::TEST_OK}, // 4: no dependencies { { {3, } }, 1, 4, { {void_cast("1"), 1}, }, 1, true, 4, 4, 0, 0, 0, Certification::TEST_OK}, // 5: shared - exclusive // 5: depends on 4 { { {2, } }, 1, 5, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, false, 5, 5, 0, 4, 0, Certification::TEST_OK}, // 6 - 8: exclusive - shared // 6: collides with 5 { { {1, } }, 1, 6, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 6, 6, 4, -1, 0, Certification::TEST_FAILED}, // 7: collides with 5 { { {1, } }, 1, 7, { {void_cast("1"), 1}, }, 1, true, 7, 7, 4, -1, 0, Certification::TEST_FAILED}, // 8: collides with 5 { { {1, } }, 1, 8, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, true, 8, 8, 4, -1, 0, Certification::TEST_FAILED}, // 9 - 10: shared key shadows dependency to 5 // 9: depends on 5 { { {2, } }, 1, 9, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 9, 9, 0, 5, 0, Certification::TEST_OK}, // 10: depends on 5 { { {2, } }, 1, 10, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 10, 10, 6, 5, 0, Certification::TEST_OK}, // 11 - 13: exclusive - shared - exclusive dependency { { {2, } }, 1, 11, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, false, 11, 11, 10, 10, 0, Certification::TEST_OK}, { { {2, } }, 1, 12, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 12, 12, 10, 11, 0, Certification::TEST_OK}, { { {2, } }, 1, 13, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, false, 13, 13, 10, 12, 0, Certification::TEST_OK}, }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); TestEnv env; galera::Certification cert(env.conf(), env.thd()); cert.assign_initial_position(0, version); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); mark_point(); for (size_t i(0); i < nws; ++i) { TrxHandle* trx(TrxHandle::New(lp, trx_params, wsi[i].uuid, wsi[i].conn_id, wsi[i].trx_id)); trx->append_key(KeyData(version, wsi[i].key, wsi[i].iov_len, (wsi[i].shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE), true)); trx->set_last_seen_seqno(wsi[i].last_seen_seqno); trx->set_flags(trx->flags() | wsi[i].flags); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); log_info << "ws[" << i << "]: " << buf.size() - offset; trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, wsi[i].local_seqno, wsi[i].global_seqno); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == wsi[i].result, "g: %lld res: %d exp: %d", trx->global_seqno(), result, wsi[i].result); fail_unless(trx->depends_seqno() == wsi[i].expected_depends_seqno, "wsi: %zu g: %lld ld: %lld eld: %lld", i, trx->global_seqno(), trx->depends_seqno(), wsi[i].expected_depends_seqno); cert.set_trx_committed(trx); trx->unref(); } } END_TEST START_TEST(test_trac_726) { log_info << "test_trac_726"; const int version(2); TestEnv env; galera::Certification cert(env.conf(), env.thd()); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); wsrep_uuid_t uuid1 = {{1, }}; wsrep_uuid_t uuid2 = {{2, }}; cert.assign_initial_position(0, version); mark_point(); wsrep_buf_t key1 = {void_cast("1"), 1}; wsrep_buf_t key2 = {void_cast("2"), 1}; { TrxHandle* trx(TrxHandle::New(lp, trx_params, uuid1, 0, 0)); trx->append_key(KeyData(version, &key1, 1, WSREP_KEY_EXCLUSIVE, true)); trx->set_last_seen_seqno(0); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, 1, 1); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == Certification::TEST_OK); cert.set_trx_committed(trx); trx->unref(); } { TrxHandle* trx(TrxHandle::New(lp, trx_params, uuid2, 0, 0)); trx->append_key(KeyData(version, &key2, 1, WSREP_KEY_EXCLUSIVE, true)); trx->append_key(KeyData(version, &key2, 1, WSREP_KEY_SHARED, true)); trx->append_key(KeyData(version, &key1, 1, WSREP_KEY_EXCLUSIVE, true)); trx->set_last_seen_seqno(0); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, 2, 2); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == Certification::TEST_FAILED); cert.set_trx_committed(trx); trx->unref(); } } END_TEST Suite* write_set_suite() { Suite* s = suite_create("write_set"); TCase* tc; tc = tcase_create("test_key1"); tcase_add_test(tc, test_key1); suite_add_tcase(s, tc); tc = tcase_create("test_key2"); tcase_add_test(tc, test_key2); suite_add_tcase(s, tc); tc = tcase_create("test_write_set1"); tcase_add_test(tc, test_write_set1); suite_add_tcase(s, tc); tc = tcase_create("test_write_set2"); tcase_add_test(tc, test_write_set2); suite_add_tcase(s, tc); tc = tcase_create("test_mapped_buffer"); tcase_add_test(tc, test_mapped_buffer); suite_add_tcase(s, tc); tc = tcase_create("test_cert_hierarchical_v1"); tcase_add_test(tc, test_cert_hierarchical_v1); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_cert_hierarchical_v2"); tcase_add_test(tc, test_cert_hierarchical_v2); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_trac_726"); tcase_add_test(tc, test_trac_726); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/galera/tests/ist_check.cpp0000644000015300001660000002443313042054732020201 0ustar jenkinsjenkins// // Copyright (C) 2011-2014 Codership Oy // #include "ist.hpp" #include "ist_proto.hpp" #include "trx_handle.hpp" #include "uuid.hpp" #include "monitor.hpp" #include "GCache.hpp" #include "gu_arch.h" #include "replicator_smm.hpp" #include using namespace galera; // Message tests START_TEST(test_ist_message) { using namespace galera::ist; Message m3(3, Message::T_HANDSHAKE, 0x2, 3, 1001); #if 0 /* This is a check for the old (broken) format */ #if GU_WORDSIZE == 32 fail_unless(serial_size(m3) == 20, "serial size %zu != 20", serial_size(m3)); #elif GU_WORDSIZE == 64 fail_unless(serial_size(m3) == 24, "serial size %zu != 24", serial_size(m3)); #endif #endif /* 0 */ gu::Buffer buf(m3.serial_size()); m3.serialize(&buf[0], buf.size(), 0); Message mu3(3); mu3.unserialize(&buf[0], buf.size(), 0); fail_unless(mu3.version() == 3); fail_unless(mu3.type() == Message::T_HANDSHAKE); fail_unless(mu3.flags() == 0x2); fail_unless(mu3.ctrl() == 3); fail_unless(mu3.len() == 1001); Message m4(4, Message::T_HANDSHAKE, 0x2, 3, 1001); fail_unless(m4.serial_size() == 12); buf.clear(); buf.resize(m4.serial_size()); m4.serialize(&buf[0], buf.size(), 0); Message mu4(4); mu4.unserialize(&buf[0], buf.size(), 0); fail_unless(mu4.version() == 4); fail_unless(mu4.type() == Message::T_HANDSHAKE); fail_unless(mu4.flags() == 0x2); fail_unless(mu4.ctrl() == 3); fail_unless(mu4.len() == 1001); } END_TEST // IST tests static pthread_barrier_t start_barrier; class TestOrder { public: TestOrder(galera::TrxHandle& trx) : trx_(trx) { } void lock() { } void unlock() { } wsrep_seqno_t seqno() const { return trx_.global_seqno(); } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { return (last_left >= trx_.depends_seqno()); } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex&) { } #endif // GU_DBUG_ON private: galera::TrxHandle& trx_; }; struct sender_args { gcache::GCache& gcache_; const std::string& peer_; wsrep_seqno_t first_; wsrep_seqno_t last_; int version_; sender_args(gcache::GCache& gcache, const std::string& peer, wsrep_seqno_t first, wsrep_seqno_t last, int version) : gcache_(gcache), peer_ (peer), first_ (first), last_ (last), version_(version) { } }; struct receiver_args { std::string listen_addr_; wsrep_seqno_t first_; wsrep_seqno_t last_; size_t n_receivers_; TrxHandle::SlavePool& trx_pool_; int version_; receiver_args(const std::string listen_addr, wsrep_seqno_t first, wsrep_seqno_t last, size_t n_receivers, TrxHandle::SlavePool& sp, int version) : listen_addr_(listen_addr), first_ (first), last_ (last), n_receivers_(n_receivers), trx_pool_ (sp), version_ (version) { } }; struct trx_thread_args { galera::ist::Receiver& receiver_; galera::Monitor monitor_; trx_thread_args(galera::ist::Receiver& receiver) : receiver_(receiver), monitor_() { } }; extern "C" void* sender_thd(void* arg) { mark_point(); const sender_args* sargs(reinterpret_cast(arg)); gu::Config conf; galera::ReplicatorSMM::InitConfig(conf, NULL, NULL); pthread_barrier_wait(&start_barrier); galera::ist::Sender sender(conf, sargs->gcache_, sargs->peer_, sargs->version_); mark_point(); sender.send(sargs->first_, sargs->last_); return 0; } extern "C" void* trx_thread(void* arg) { trx_thread_args* targs(reinterpret_cast(arg)); pthread_barrier_wait(&start_barrier); targs->receiver_.ready(); while (true) { galera::TrxHandle* trx(0); int err; if ((err = targs->receiver_.recv(&trx)) != 0) { assert(trx == 0); log_info << "terminated with " << err; return 0; } TestOrder to(*trx); targs->monitor_.enter(to); targs->monitor_.leave(to); trx->unref(); } return 0; } extern "C" void* receiver_thd(void* arg) { mark_point(); receiver_args* rargs(reinterpret_cast(arg)); gu::Config conf; galera::ReplicatorSMM::InitConfig(conf, NULL, NULL); mark_point(); conf.set(galera::ist::Receiver::RECV_ADDR, rargs->listen_addr_); galera::ist::Receiver receiver(conf, rargs->trx_pool_, 0); rargs->listen_addr_ = receiver.prepare(rargs->first_, rargs->last_, rargs->version_); mark_point(); std::vector threads(rargs->n_receivers_); trx_thread_args trx_thd_args(receiver); for (size_t i(0); i < threads.size(); ++i) { log_info << "starting trx thread " << i; pthread_create(&threads[0] + i, 0, &trx_thread, &trx_thd_args); } trx_thd_args.monitor_.set_initial_position(rargs->first_ - 1); pthread_barrier_wait(&start_barrier); trx_thd_args.monitor_.wait(rargs->last_); for (size_t i(0); i < threads.size(); ++i) { log_info << "joining trx thread " << i; pthread_join(threads[i], 0); } receiver.finished(); return 0; } static int select_trx_version(int protocol_version) { // see protocol version table in replicator_smm.hpp switch (protocol_version) { case 1: case 2: return 1; case 3: case 4: return 2; case 5: return 3; } fail("unknown protocol version %i", protocol_version); return -1; } static void test_ist_common(int const version) { using galera::KeyData; using galera::TrxHandle; using galera::KeyOS; TrxHandle::LocalPool lp(TrxHandle::LOCAL_STORAGE_SIZE(), 4, "ist_common"); TrxHandle::SlavePool sp(sizeof(TrxHandle), 4, "ist_common"); int const trx_version(select_trx_version(version)); TrxHandle::Params const trx_params("", trx_version, galera::KeySet::MAX_VERSION); gu::Config conf; galera::ReplicatorSMM::InitConfig(conf, NULL, NULL); std::string gcache_file("ist_check.cache"); conf.set("gcache.name", gcache_file); std::string dir("."); std::string receiver_addr("tcp://127.0.0.1:0"); wsrep_uuid_t uuid; gu_uuid_generate(reinterpret_cast(&uuid), 0, 0); gcache::GCache* gcache = new gcache::GCache(conf, dir); mark_point(); // populate gcache for (size_t i(1); i <= 10; ++i) { TrxHandle* trx(TrxHandle::New(lp, trx_params, uuid, 1234+i, 5678+i)); const wsrep_buf_t key[2] = { {"key1", 4}, {"key2", 4} }; trx->append_key(KeyData(trx_version, key, 2, WSREP_KEY_EXCLUSIVE,true)); trx->append_data("bar", 3, WSREP_DATA_ORDERED, true); assert (i > 0); int last_seen(i - 1); int pa_range(i); gu::byte_t* ptr(0); if (trx_version < 3) { trx->set_last_seen_seqno(last_seen); size_t trx_size(trx->serial_size()); ptr = static_cast(gcache->malloc(trx_size)); trx->serialize(ptr, trx_size, 0); } else { galera::WriteSetNG::GatherVector bufs; ssize_t trx_size(trx->write_set_out().gather(trx->source_id(), trx->conn_id(), trx->trx_id(), bufs)); trx->set_last_seen_seqno(last_seen); ptr = static_cast(gcache->malloc(trx_size)); /* concatenate buffer vector */ gu::byte_t* p(ptr); for (size_t k(0); k < bufs->size(); ++k) { ::memcpy(p, bufs[k].ptr, bufs[k].size); p += bufs[k].size; } assert ((p - ptr) == trx_size); gu::Buf ws_buf = { ptr, trx_size }; galera::WriteSetIn wsi(ws_buf); assert (wsi.last_seen() == last_seen); assert (wsi.pa_range() == 0); wsi.set_seqno(i, pa_range); assert (wsi.seqno() == int64_t(i)); assert (wsi.pa_range() == pa_range); } gcache->seqno_assign(ptr, i, i - pa_range); trx->unref(); } mark_point(); receiver_args rargs(receiver_addr, 1, 10, 1, sp, version); sender_args sargs(*gcache, rargs.listen_addr_, 1, 10, version); pthread_barrier_init(&start_barrier, 0, 1 + 1 + rargs.n_receivers_); pthread_t sender_thread, receiver_thread; pthread_create(&sender_thread, 0, &sender_thd, &sargs); mark_point(); usleep(100000); pthread_create(&receiver_thread, 0, &receiver_thd, &rargs); mark_point(); pthread_join(sender_thread, 0); pthread_join(receiver_thread, 0); mark_point(); delete gcache; mark_point(); unlink(gcache_file.c_str()); } START_TEST(test_ist_v1) { test_ist_common(1); } END_TEST START_TEST(test_ist_v2) { test_ist_common(2); } END_TEST START_TEST(test_ist_v3) { test_ist_common(3); } END_TEST START_TEST(test_ist_v4) { test_ist_common(4); } END_TEST START_TEST(test_ist_v5) { test_ist_common(5); } END_TEST Suite* ist_suite() { Suite* s = suite_create("ist"); TCase* tc; tc = tcase_create("test_ist_message"); tcase_add_test(tc, test_ist_message); suite_add_tcase(s, tc); tc = tcase_create("test_ist_v1"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v1); suite_add_tcase(s, tc); tc = tcase_create("test_ist_v2"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v2); suite_add_tcase(s, tc); tc = tcase_create("test_ist_v3"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v3); suite_add_tcase(s, tc); tc = tcase_create("test_ist_v4"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v4); suite_add_tcase(s, tc); tc = tcase_create("test_ist_v5"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v5); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/gcomm/0000755000015300001660000000000013042054732014240 5ustar jenkinsjenkinsgalera-3-25.3.20/gcomm/src/0000755000015300001660000000000013042054732015027 5ustar jenkinsjenkinsgalera-3-25.3.20/gcomm/src/gcomm/0000755000015300001660000000000013042054732016131 5ustar jenkinsjenkinsgalera-3-25.3.20/gcomm/src/gcomm/protonet.hpp0000644000015300001660000000525213042054732020520 0ustar jenkinsjenkins// // Copyright (C) 2009 Codership Oy // //! // @file protonet.hpp // // This file defines protonet interface used by gcomm. // #ifndef GCOMM_PROTONET_HPP #define GCOMM_PROTONET_HPP #include "gu_uri.hpp" #include "gu_datetime.hpp" #include "protostack.hpp" #include "gu_config.hpp" #include "socket.hpp" #include #include #ifndef GCOMM_PROTONET_MAX_VERSION #define GCOMM_PROTONET_MAX_VERSION 0 #endif // GCOMM_PROTONET_MAX_VERSION namespace gcomm { // Forward declarations class Protonet; } //! // Abstract Protonet interface class // class gcomm::Protonet { public: Protonet(gu::Config& conf, const std::string& type, int version) : protos_ (), version_(version), conf_ (conf), type_ (type) { } virtual ~Protonet() { } //! // Insert Protostack to be handled by Protonet // // @param pstack Pointer to Protostack // void insert(Protostack* pstack); //! // Erase Protostack from Protonet to stop dispatching events // to Protostack // // @param pstack Pointer to Protostack // void erase(Protostack* pstack); //! // Create new Socket // // @param uri URI to specify Socket type // // @return Socket // virtual gcomm::SocketPtr socket(const gu::URI& uri) = 0; //! // Create new Acceptor // // @param uri URI to specify Acceptor type // // @return Acceptor // virtual Acceptor* acceptor(const gu::URI& uri) = 0; //! // Dispatch events until period p has passed or event // loop is interrupted. // // @param p Period to run event_loop(), negative value means forever // virtual void event_loop(const gu::datetime::Period& p) = 0; //! // Iterate over Protostacks and handle timers // // @return Time of next known timer expiration // gu::datetime::Date handle_timers(); //! // Interrupt event loop // virtual void interrupt() = 0; //! // Enter Protonet critical section // virtual void enter() = 0; //! // Leave Protonet critical section // virtual void leave() = 0; bool set_param(const std::string& key, const std::string& val); gu::Config& conf() { return conf_; } //! // Factory method for creating Protonets // static Protonet* create(gu::Config& conf); const std::string& type() const { return type_; } virtual size_t mtu() const = 0; protected: std::deque protos_; int version_; static const int max_version_ = GCOMM_PROTONET_MAX_VERSION; gu::Config& conf_; private: std::string type_; }; #endif // GCOMM_PROTONET_HPP galera-3-25.3.20/gcomm/src/gcomm/map.hpp0000644000015300001660000001703313042054732017423 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ /*! * @file map.hpp * * This file contains templates that are thin wrappers for std::map * and std::multimap with some extra functionality. */ #ifndef GCOMM_MAP_HPP #define GCOMM_MAP_HPP #include "gu_serialize.hpp" #include #include #include #include "gcomm/exception.hpp" #include "gcomm/types.hpp" namespace gcomm { template class MapBase { typedef C MapType; public: typedef typename MapType::iterator iterator; typedef typename MapType::const_iterator const_iterator; typedef typename MapType::reverse_iterator reverse_iterator; typedef typename MapType::const_reverse_iterator const_reverse_iterator; typedef typename MapType::value_type value_type; typedef typename MapType::const_reference const_reference; typedef typename MapType::key_type key_type; typedef typename MapType::mapped_type mapped_type; protected: MapType map_; public: MapBase() : map_() {} virtual ~MapBase() {} iterator begin() { return map_.begin(); } iterator end() { return map_.end(); } iterator find(const K& k) { return map_.find(k); } iterator find_checked(const K& k) { iterator ret = map_.find(k); if (ret == map_.end()) { gu_throw_fatal << "element " << k << " not found"; } return ret; } iterator lower_bound(const K& k) { return map_.lower_bound(k); } const_iterator begin() const { return map_.begin(); } const_iterator end() const { return map_.end(); } const_reverse_iterator rbegin() const { return map_.rbegin(); } const_reverse_iterator rend() const { return map_.rend(); } const_iterator find(const K& k) const { return map_.find(k); } const_iterator find_checked(const K& k) const { const_iterator ret = map_.find(k); if (ret == map_.end()) { gu_throw_fatal << "element " << k << " not found"; } return ret; } mapped_type& operator[](const key_type& k) { return map_[k]; } void erase(iterator i) { map_.erase(i); } void erase(iterator i, iterator j) { map_.erase(i, j); } void erase(const K& k) { map_.erase(k); } void clear() { map_.clear(); } size_t size() const { return map_.size(); } bool empty() const { return map_.empty(); } size_t serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = gu::serialize4( static_cast(size()), buf, buflen, offset)); for (const_iterator i = map_.begin(); i != map_.end(); ++i) { gu_trace(offset = key(i).serialize(buf, buflen, offset)); gu_trace(offset = value(i).serialize(buf, buflen, offset)); } return offset; } size_t unserialize(const gu::byte_t* buf, size_t const buflen, size_t offset) { uint32_t len; // Clear map in case this object is reused map_.clear(); gu_trace(offset = gu::unserialize4(buf, buflen, offset, len));; for (uint32_t i = 0; i < len; ++i) { K k; V v; gu_trace(offset = k.unserialize(buf, buflen, offset)); gu_trace(offset = v.unserialize(buf, buflen, offset)); if (map_.insert(std::make_pair(k, v)).second == false) { gu_throw_fatal << "Failed to unserialize map"; } } return offset; } size_t serial_size() const { return sizeof(uint32_t) + size()*(K::serial_size() + V::serial_size()); } bool operator==(const MapBase& other) const { return (map_ == other.map_); } bool operator!=(const MapBase& other) const { return !(map_ == other.map_); } static const K& key(const_iterator i) { return i->first; } static const K& key(iterator i) { return i->first; } static const V& value(const_iterator i) { return i->second; } static V& value(iterator i) { return i->second; } static const K& key(const value_type& vt) { return vt.first; } static V& value(value_type& vt) { return vt.second; } static const V& value(const value_type& vt) { return vt.second; } }; // @todo For some reason map key must be declared in gcomm namespace // in order this to work. Find out the reason why and fix. template std::ostream& operator<<(std::ostream& os, const std::pair& p) { return (os << "\t" << p.first << "," << p.second << "\n"); } template std::ostream& operator<<(std::ostream& os, const MapBase& map) { std::copy(map.begin(), map.end(), std::ostream_iterator >(os, "")); return os; } template > class Map : public MapBase { public: typedef typename MapBase::iterator iterator; std::pair insert(const std::pair& p) { return MapBase::map_.insert(p); } template void insert(InputIterator first, InputIterator last) { MapBase::map_.insert(first, last); } iterator insert_unique(const typename MapBase::value_type& p) { std::pair ret = MapBase::map_.insert(p); if (false == ret.second) { gu_throw_fatal << "duplicate entry " << "key=" << MapBase::key(p) << " " << "value=" << MapBase::value(p) << " " << "map=" << *this; } return ret.first; } }; template > class MultiMap : public MapBase { public: typedef typename MapBase::iterator iterator; typedef typename MapBase::const_iterator const_iterator; typedef typename MapBase::value_type value_type; typedef typename MapBase::const_reference const_reference; iterator insert(const std::pair& p) { return MapBase::map_.insert(p); } iterator insert(iterator position, const value_type& vt) { return MapBase::map_.insert(position, vt); } std::pair equal_range(const K& k) const { return MapBase::map_.equal_range(k); } }; } #endif /* GCOMM_MAP_HPP */ galera-3-25.3.20/gcomm/src/gcomm/uuid.hpp0000644000015300001660000000355413042054732017617 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef _GCOMM_UUID_HPP_ #define _GCOMM_UUID_HPP_ #include "gcomm/exception.hpp" #include "gcomm/types.hpp" #include "gu_utils.hpp" #include "gu_assert.hpp" #include "gu_byteswap.h" #include "gu_uuid.hpp" #include #include namespace gcomm { class UUID; std::ostream& operator<<(std::ostream&, const UUID&); } class gcomm::UUID : public gu::UUID { public: UUID() : gu::UUID() {} UUID(const void* node, const size_t node_len) : gu::UUID(node, node_len) {} UUID(const int32_t idx) : gu::UUID() { assert(idx > 0); memcpy(&uuid_, &idx, sizeof(idx)); } static const UUID& nil() { return uuid_nil_; } std::ostream& to_stream(std::ostream& os, bool full) const { std::ios_base::fmtflags saved = os.flags(); if (full == true) { os << uuid_; } else { os << std::hex << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[0]) << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[1]) << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[2]) << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[3]); } os.flags(saved); return os; } // Prefer the above function over this one std::string full_str() const { std::ostringstream os; to_stream(os, true); return os.str(); } private: static const UUID uuid_nil_; UUID(gu_uuid_t uuid) : gu::UUID(uuid) {} }; inline std::ostream& gcomm::operator<<(std::ostream& os, const UUID& uuid) { return uuid.to_stream (os, false); } #endif // _GCOMM_UUID_HPP_ galera-3-25.3.20/gcomm/src/gcomm/exception.hpp0000644000015300001660000000106613042054732020643 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ /*! * @file exception.hpp * * @brief GComm exception definitions. */ #ifndef GCOMM_EXCEPTION_HPP #define GCOMM_EXCEPTION_HPP #include "gu_throw.hpp" /*! * Assert macro for runtime condition checking. This should be used * for conditions that may depend on external input and are required * to validate correct protocol operation. */ #define gcomm_assert(cond_) \ if ((cond_) == false) gu_throw_fatal << #cond_ << ": " #endif // GCOMM_EXCEPTION_HPP galera-3-25.3.20/gcomm/src/gcomm/datagram.hpp0000644000015300001660000002066513042054732020433 0ustar jenkinsjenkins/* * Copyright (C) 2010-2013 Codership Oy */ #ifndef GCOMM_DATAGRAM_HPP #define GCOMM_DATAGRAM_HPP #include "gu_buffer.hpp" #include "gu_serialize.hpp" #include "gu_utils.hpp" #include #include #include namespace gcomm { //! // @class NetHeader // // @brief Header for datagrams sent over network // // Header structure is the following (MSB first) // // | version(4) | reserved(2) | F_CRC(2) | len(24) | // | CRC(32) | // class NetHeader { public: typedef enum checksum { CS_NONE = 0, CS_CRC32, CS_CRC32C } checksum_t; static checksum_t checksum_type (int i); NetHeader() : len_(), crc32_() { } NetHeader(uint32_t len, int version) : len_(len), crc32_(0) { if (len > len_mask_) gu_throw_error(EINVAL) << "msg too long " << len_; len_ |= (static_cast(version) << version_shift_); } uint32_t len() const { return (len_ & len_mask_); } void set_crc32(uint32_t crc32, checksum_t type) { assert (CS_CRC32 == type || CS_CRC32C == type); crc32_ = crc32; CS_CRC32 == type ? len_ |= F_CRC32 : len_ |= F_CRC32C; } bool has_crc32() const { return (len_ & F_CRC32); } bool has_crc32c() const { return (len_ & F_CRC32C); } uint32_t crc32() const { return crc32_; } int version() const { return ((len_ & version_mask_) >> version_shift_); } friend size_t serialize(const NetHeader& hdr, gu::byte_t* buf, size_t buflen, size_t offset); friend size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, NetHeader& hdr); friend size_t serial_size(const NetHeader& hdr); static const size_t serial_size_ = 8; private: static const uint32_t len_mask_ = 0x00ffffff; static const uint32_t flags_mask_ = 0x0f000000; static const uint32_t flags_shift_ = 24; static const uint32_t version_mask_ = 0xf0000000; static const uint32_t version_shift_ = 28; enum { F_CRC32 = 1 << 24, /* backward compatible */ F_CRC32C = 1 << 25 }; uint32_t len_; uint32_t crc32_; }; inline size_t serialize(const NetHeader& hdr, gu::byte_t* buf, size_t buflen, size_t offset) { offset = gu::serialize4(hdr.len_, buf, buflen, offset); offset = gu::serialize4(hdr.crc32_, buf, buflen, offset); return offset; } inline size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, NetHeader& hdr) { offset = gu::unserialize4(buf, buflen, offset, hdr.len_); offset = gu::unserialize4(buf, buflen, offset, hdr.crc32_); switch (hdr.version()) { case 0: if ((hdr.len_ & NetHeader::flags_mask_) & ~(NetHeader::F_CRC32 | NetHeader::F_CRC32C)) { gu_throw_error(EPROTO) << "invalid flags " << ((hdr.len_ & NetHeader::flags_mask_) >> NetHeader::flags_shift_); } break; default: gu_throw_error(EPROTO) << "invalid protocol version " << hdr.version(); } return offset; } inline size_t serial_size(const NetHeader& hdr) { return NetHeader::serial_size_; } /*! * @brief Datagram container * * Datagram class provides consistent interface for managing * datagrams/byte buffers. */ class Datagram { public: Datagram() : header_ (), header_offset_(header_size_), payload_ (new gu::Buffer()), offset_ (0) { } /*! * @brief Construct new datagram from byte buffer * * @param[in] buf Const pointer to data buffer * @param[in] buflen Length of data buffer * * @throws std::bad_alloc */ Datagram(const gu::Buffer& buf, size_t offset = 0) : header_ (), header_offset_(header_size_), payload_ (new gu::Buffer(buf)), offset_ (offset) { assert(offset_ <= payload_->size()); } Datagram(const gu::SharedBuffer& buf, size_t offset = 0) : header_ (), header_offset_(header_size_), payload_ (buf), offset_ (offset) { assert(offset_ <= payload_->size()); } /*! * @brief Copy constructor. * * @note Only for normalized datagrams. * * @param[in] dgram Datagram to make copy from * @param[in] off * @throws std::bad_alloc */ Datagram(const Datagram& dgram, size_t off = std::numeric_limits::max()) : // header_(dgram.header_), header_offset_(dgram.header_offset_), payload_(dgram.payload_), offset_(off == std::numeric_limits::max() ? dgram.offset_ : off) { assert(offset_ <= dgram.len()); memcpy(header_ + header_offset_, dgram.header_ + dgram.header_offset(), dgram.header_len()); } /*! * @brief Destruct datagram */ ~Datagram() { } void normalize() { const gu::SharedBuffer old_payload(payload_); payload_ = gu::SharedBuffer(new gu::Buffer); payload_->reserve(header_len() + old_payload->size() - offset_); if (header_len() > offset_) { payload_->insert(payload_->end(), header_ + header_offset_ + offset_, header_ + header_size_); offset_ = 0; } else { offset_ -= header_len(); } header_offset_ = header_size_; payload_->insert(payload_->end(), old_payload->begin() + offset_, old_payload->end()); offset_ = 0; } gu::byte_t* header() { return header_; } const gu::byte_t* header() const { return header_; } size_t header_size() const { return header_size_; } size_t header_len() const { return (header_size_ - header_offset_); } size_t header_offset() const { return header_offset_; } void set_header_offset(const size_t off) { // assert(off <= header_size_); if (off > header_size_) gu_throw_fatal << "out of hdrspace"; header_offset_ = off; } const gu::Buffer& payload() const { assert(payload_ != 0); return *payload_; } gu::Buffer& payload() { assert(payload_ != 0); return *payload_; } size_t len() const { return (header_size_ - header_offset_ + payload_->size()); } size_t offset() const { return offset_; } private: friend uint16_t crc16(const Datagram&, size_t); friend uint32_t crc32(NetHeader::checksum_t, const Datagram&, size_t); static const size_t header_size_ = 128; gu::byte_t header_[header_size_]; size_t header_offset_; gu::SharedBuffer payload_; size_t offset_; }; uint16_t crc16(const Datagram& dg, size_t offset = 0); uint32_t crc32(NetHeader::checksum_t type, const Datagram& dg, size_t offset = 0); /* returns true if checksum fails */ inline bool check_cs (const NetHeader& hdr, const Datagram& dg) { if (hdr.has_crc32c()) return (crc32(NetHeader::CS_CRC32C, dg) != hdr.crc32()); if (hdr.has_crc32()) return (crc32(NetHeader::CS_CRC32, dg) != hdr.crc32()); return (hdr.crc32() != 0); } } /* namespace gcomm */ #endif // GCOMM_DATAGRAM_HPP galera-3-25.3.20/gcomm/src/gcomm/common.hpp0000644000015300001660000000135013042054732020131 0ustar jenkinsjenkins/* * Copyright (C) 2012 Codership Oy */ /*! * @file common.hpp * * @brief Imports definitions from the global common.h */ #ifndef GCOMM_COMMON_HPP #define GCOMM_COMMON_HPP #if defined(HAVE_COMMON_H) #include #endif #include namespace gcomm { #if defined(HAVE_COMMON_H) static std::string const BASE_PORT_KEY(COMMON_BASE_PORT_KEY); static std::string const BASE_PORT_DEFAULT(COMMON_BASE_PORT_DEFAULT); static std::string const BASE_DIR_DEFAULT(COMMON_BASE_DIR_DEFAULT); #else static std::string const BASE_PORT_KEY("base_port"); static std::string const BASE_PORT_DEFAULT("4567"); static std::string const BASE_DIR_DEFAULT("."); #endif } #endif /* GCOMM_COMMON_HPP */ galera-3-25.3.20/gcomm/src/gcomm/conf.hpp0000644000015300001660000004722613042054732017602 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ /*! * @file conf.hpp * * @brief Configuration parameters and utility templates. */ #ifndef GCOMM_CONF_HPP #define GCOMM_CONF_HPP #include "gu_config.hpp" #include "gu_uri.hpp" #include "gu_throw.hpp" namespace gcomm { /*! * Configuration parameter definitions. * * Transport definition and configuration parameters are passed to * Transport::create() in the URI form. URI scheme part defines * which transport is returned. Currently recognized are "tcp", "gmcast" * and "pc". This will change in the future. * * URI format is the following: * gcomm://[[:]][?=&=]... * The key/value pairs can be used to pass configuration parameters to * gcomm layers. * * Time periods as parameter values follow ISO8601 duration representation * (as represented in http://en.wikipedia.org/wiki/ISO_8601#Durations). * Examples: * - PT1S - one second * - PT1M30S = one minute 30 secs * - P1DT6H = one day, 6 hours * * To get subsecond resolution, second part can be represented as decimal * number, but currently it is not recommended due to bug in Period * parsing routine (rounding errors can result inaccurate boundary * value checking). */ struct Conf { static std::string const ProtonetBackend; static std::string const ProtonetVersion; /*! * @brief TCP non-blocking flag ("socket.non_blocking") * * Parameter value is boolean (passed 0 or 1) denoting whether * the socket should or should not be in non-blocking state. */ static std::string const TcpNonBlocking; /*! * @brief Algorithm for message checksums: * 0 - none (backward compatible) * 1 - CRC-32 (backward compatible) * 2 - CRC-32C (optimized and potentially HW-accelerated on Intel CPUs) */ static std::string const SocketChecksum; /*! * @brief Socket receive buffer size in bytes */ static std::string const SocketRecvBufSize; /*! * @brief GMCast scheme for transport URI ("gmcast") */ static std::string const GMCastScheme; /*! * @brief GMCast protocol version */ static std::string const GMCastVersion; /*! * @brief GMCast group name ("gmcast.group") * * String denoting group name. Max length of string is 16. Peer nodes * accept GMCast connection only if the group names match. */ static std::string const GMCastGroup; /*! * @brief GMCast listening address ("gmcast.listen_addr") * * Listening address for GMCast. Address is currently passed in * URI format (for example tcp://192.168.3.1:4567) and it should * be passed as the last configuration parameter in order to * avoid confusion. If parameter value is undefined, GMCast * starts listening all interfaces at default port 4567. */ static std::string const GMCastListenAddr; /*! * @brief GMCast multicast address ("gmcast.mcast_addr") * * Multicast address for GMCast. By default multicast socket * is bound to the same interface as conf::GMCastListenAddr. * If multicast interface must be specified, the only way * to do it is currently via listening address configuration. */ static std::string const GMCastMCastAddr; /*! * @brief GMCast multicast port ("gmcast.mcast_port") * * Multicast port for GMCast. By default multicast uses the * same port as GMCast TCP connections. */ static std::string const GMCastMCastPort; /*! * @brief GMCast multicast TTL ("gmcast.mcast_ttl") * * This parameter controls multicast packet TTL. By default it * is set to 1 and usually it should not be changed unless * adviced so. This means that multicast is limited to single LAN * segment. */ static std::string const GMCastMCastTTL; static std::string const GMCastTimeWait; static std::string const GMCastPeerTimeout; /*! * @brief Maximum initial reconnect attempts * * Maximum initial reconnect attempts for address reported by peer. */ static std::string const GMCastMaxInitialReconnectAttempts; /*! * @brief Add or remove peer address. * * Setting value to add:://: will inject new peer * address in address list. Setting value to del:://: * will remove peer address from list (via forget procedure). */ static std::string const GMCastPeerAddr; /*! * @brief Isolate node from peers * * Setting this value to 'true' closes all conections * and will prevent forming of new connections until * value is set again to 'false'. This parameter should be * used for testing purposes only and it will not be visible * in global configuration array. */ static std::string const GMCastIsolate; /*! * @brief Segment identifier for segmentation. */ static std::string const GMCastSegment; /*! * @brief EVS scheme for transport URI ("evs") */ static std::string const EvsScheme; /*! * @brief EVS protocol version */ static std::string const EvsVersion; /*! * @brief EVS view forget timeout ("evs.view_forget_timeout") * * This timeout controls how long information about * known group views is maintained. This information is needed * to filter out delayed messages from previous views that are not * live anymore. Default value is 5 minutes and there is usually not * need to change it. */ static std::string const EvsViewForgetTimeout; /*! * @brief EVS suspect timeout ("evs.suspect_timeout") * * This timeout controls how long node can remain silent until * it is put under suspicion. If majority of the current group * agree that the node is under suspicion, it is discarded from * group and new group view is formed immediately. If majority * of the group does not agree about suspicion, Conf::EvsInactiveTimeout * is waited until forming of new group will be attempted. * Default value is 5 seconds. */ static std::string const EvsSuspectTimeout; /*! * @brief EVS inactive timeout ("evs.inactive_timeout") * * This timeout control how long node can remain completely silent * until it is discarded from the group. This is hard limit, unlike * Conf::EvsSuspectTimeout, and the node is discarded even if it * becomes live during the formation of the new group. Default value * is 15 seconds. */ static std::string const EvsInactiveTimeout; /*! * @brief EVS inactive check period ("evs.inactive_check_period") * * This period controls how often node liveness is checked. Default * is 1 second and there is no need to change this unless * Conf::EvsSuspectTimeout or Conf::EvsInactiveTimeout is adjusted * to smaller value. Default value is 1 second, minimum is 0.1 seconds * and maximum is Conf::EvsSuspectTimeout/2. */ static std::string const EvsInactiveCheckPeriod; static std::string const EvsInstallTimeout; /*! * @brief EVS keepalive period ("evs.keepalive_period") * * This timeout controls how often keepalive messages are * sent into network. Node liveness is determined with * these keepalives, so the value sould be significantly smaller * than Conf::EvsSuspectTimeout. Default value is 1 second, * minimum is 0.1 seconds and maximum is Conf::EvsSuspectTimeout/3. */ static std::string const EvsKeepalivePeriod; /*! * @brief EVS join retransmission period ("evs.join_retrans_period") * * This parameter controls how often join messages are retransmitted * during group formation. There is usually no need to adjust * this value. Default value is 0.3 seconds, minimum is 0.1 seconds * and maximum is Conf::EvsSuspectTimeout/3. */ static std::string const EvsJoinRetransPeriod; /*! * @brief EVS statistics reporting period ("evs.stats_report_period") * * This parameters controls how often statistics information is * printed in the log. This parameter has effect only if * statistics reporting is enabled via Conf::EvsInfoLogMask. Default * value is 1 minute. */ static std::string const EvsStatsReportPeriod; /*! * @brief EVS debug log mask ("evs.debug_log_mask") * * This mask controls what debug information is printed in the logs * if debug logging is turned on. Mask value is bitwise-or * from values gcomm::evs::Proto::DebugFlags. By default only * state information is printed. */ static std::string const EvsDebugLogMask; /*! * @brief EVS info log mask ("evs.info_log_mask") * * This mask controls what info log is printed in the logs. * Mask value is bitwise-or from values gcomm::evs::Proto::InfoFlags. */ static std::string const EvsInfoLogMask; /*! * @brief EVS send window ("evs.send_window") * * This parameter controls how many messages protocol layer is * allowed to send without getting all acknowledgements for any of them. * Default value is 32. */ static std::string const EvsSendWindow; /*! * @brief EVS user send window ("evs.user_send_window") * * Like Conf::EvsSendWindow, but for messages for which sending * is initiated by call from upper layer. Default value is 16. */ static std::string const EvsUserSendWindow; /*! * @brief EVS message aggregation mode ("evs.use_aggregate") * * This parameter controls whether EVS is allowed to aggregate * several user messages into one message. By default this option * is enabled and there should be no need to disable it unless * adviced so. */ static std::string const EvsUseAggregate; /*! * @brief Period to generate keepalives for causal messages * */ static std::string const EvsCausalKeepalivePeriod; /*! * @brief EVS maximum install timeouts ("evs.max_install_timeouts") * * This parameter controls how many install attempts are done * before declaring other nodes as inactive and trying to re-establish * group via singleton views. */ static std::string const EvsMaxInstallTimeouts; /*! * @brief Margin over keepalive period after which node is declared * delayed. This should be greater than the largest RTT * between cluster nodes. */ static std::string const EvsDelayMargin; /*! * @brief Period which determines how long delayed node is kept in * delayed list after it becomes responsive again. * * The actual time that node stays in delayed list is * EvsDelayedKeepPeriod times the number of changes between * OK and DELAYED state. */ static std::string const EvsDelayedKeepPeriod; /*! * @brief List of nodes (UUIDs) that should be evicted permanently from * cluster. * * Setting value to nil UUID will clear the evict list. */ static std::string const EvsEvict; /*! * @brief Autoevict threshold. */ static std::string const EvsAutoEvict; /*! * @brief PC scheme for transport URI ("pc") */ static std::string const PcScheme; /*! * @brief PC protocol version */ static std::string const PcVersion; /*! * @brief PC split-brain mode * * This parameter controls whether PC is allowed to continue * operation despite of possible split brain condition. */ static std::string const PcIgnoreSb; /*! * @brief PC quorum mode * * This parameter controls whether PC is allowed to continue * operation despite of quorum loss. */ static std::string const PcIgnoreQuorum; /*! * @brief PC message checksumming * * This parameter controls whether PC layer does message * checksumming. */ static std::string const PcChecksum; /*! * @brief PC starup announce timeout */ static std::string const PcAnnounceTimeout; /*! * @brief PC close linger timeout */ static std::string const PcLinger; /*! * @brief PC newer prim view overrides */ static std::string const PcNpvo; /*! * @brief If set during runtime bootstraps new PC */ static std::string const PcBootstrap; /*! * @brief Wait for prim comp unconditionally if set to true */ static std::string const PcWaitPrim; /*! * @brief Timeout on waiting for primary component */ static std::string const PcWaitPrimTimeout; /*! * @brief Node weight in prim comp voting */ static std::string const PcWeight; /*! * @brief PC recovery from cluster crash */ static std::string const PcRecovery; static void register_params(gu::Config&); static void check_params(const gu::Config&); struct Check { Check(gu::Config& conf) { check_params(conf); } virtual ~Check() {} // to pacify older GCCs with -Werror=effc++ }; static size_t check_recv_buf_size(const std::string& val); }; // Helper templates to read configuration parameters. template T _conf_param(const gu::URI& uri, const std::string& param, const T* default_value = 0, const T* min_value = 0, const T* max_value = 0) { T ret; try { ret = gu::from_string(uri.get_option(param)); } catch (gu::NotFound& e) { // cppcheck-suppress nullPointer if (default_value == 0) { gu_throw_error(EINVAL) << "param " << param << " not found from uri " << uri.to_string(); } // cppcheck-suppress nullPointer ret = *default_value; } if (min_value != 0 && *min_value > ret) { gu_throw_error(EINVAL) << "param " << param << " value " << ret << " out of range " << "min allowed " << *min_value; } if (max_value != 0 && *max_value < ret) { gu_throw_error(EINVAL) << "param " << param << " value " << ret << " out of range " << "max allowed " << *max_value; } return ret; } template T conf_param(const gu::URI& uri, const std::string& param) { return _conf_param(uri, param, 0, 0, 0); } template T conf_param_def(const gu::URI& uri, const std::string& param, const T& default_value) { return _conf_param(uri, param, &default_value); } template T conf_param_range(const gu::URI& uri, const std::string& param, const T& min_value, const T& max_value) { return _conf_param(uri, param, 0, &min_value, &max_value); } template T conf_param_def_min(const gu::URI& uri, const std::string& param, const T& default_value, const T& min_value) { return _conf_param(uri, param, &default_value, &min_value); } template T conf_param_def_max(const gu::URI& uri, const std::string& param, const T& default_value, const T& max_value) { return _conf_param(uri, param, &default_value, reinterpret_cast(0), &max_value); } template T conf_param_def_range(const gu::URI& uri, const std::string& param, const T& default_value, const T& min_value, const T& max_value) { return _conf_param(uri, param, &default_value, &min_value, &max_value); } template T param(gu::Config& conf, const gu::URI& uri, const std::string& key, const std::string& def, std::ios_base& (*f)(std::ios_base&) = std::dec) { T ret; try { std::string cnf(conf.get(key, def)); std::string val(uri.get_option(key, cnf)); try { ret = gu::from_string(val, f); } catch (gu::NotFound) { gu_throw_error(EINVAL) << "Bad value '" << val << "' for parameter '" << key << "'"; } } catch (gu::NotFound) { gu_throw_error(EINVAL) << "Unrecognized parameter '" << key << "'"; } return ret; } template T check_range(const std::string& key, const T& val, const T& min, const T& max) { if (val < min || val >= max) { gu_throw_error(ERANGE) << "parameter '" << key << "' value " << val << " is out of range [" << min << "," << max << ")"; } return val; } template T check_range(const std::string& key, const std::string& val, const T& min, const T& max) { return check_range(key, gu::Config::from_config(val), min, max); } template T check_range(const gu::Config& conf, const std::string& key, const T& min, const T& max) { return check_range(key, conf.get(key), min, max); } } // namespace gcomm #endif // GCOMM_CONF_HPP galera-3-25.3.20/gcomm/src/gcomm/view.hpp0000644000015300001660000001607313042054732017623 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ /*! * @file Group view class (used in the ProtoUpMeta (protolay.hpp) */ #ifndef _GCOMM_VIEW_HPP_ #define _GCOMM_VIEW_HPP_ #include "gcomm/uuid.hpp" #include "gcomm/types.hpp" #include "gcomm/map.hpp" #include "gcomm/conf.hpp" namespace gcomm { typedef enum { V_NONE = -1, V_REG = 0, V_TRANS = 1, V_NON_PRIM = 2, V_PRIM = 3 } ViewType; class ViewId { public: ViewId(const ViewType type = V_NONE, const UUID& uuid = UUID::nil(), const uint32_t seq = 0) : type_(type), uuid_(uuid), seq_ (seq) { } ViewId(const ViewType type, const ViewId& vi) : type_(type), uuid_(vi.uuid()), seq_ (vi.seq()) { } virtual ~ViewId() { } ViewType type() const { return type_; } const UUID& uuid() const { return uuid_; } uint32_t seq() const { return seq_; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; static size_t serial_size() { return UUID::serial_size() + sizeof(reinterpret_cast(0)->seq_); } bool operator<(const ViewId& cmp) const { // View ordering: // 1) view seq less than // 2) uuid newer than // 3) type less than return (seq_ < cmp.seq_ || (seq_ == cmp.seq_ && (cmp.uuid_.older(uuid_) || (uuid_ == cmp.uuid_ && type_ < cmp.type_) ) ) ); } bool operator==(const ViewId& cmp) const { return (seq_ == cmp.seq_ && type_ == cmp.type_ && uuid_ == cmp.uuid_); } bool operator!=(const ViewId& cmp) const { return !(*this == cmp); } std::ostream& write_stream(std::ostream& os) const { os << static_cast(type_) << " "; uuid_.write_stream(os); os << " " << seq_; return os; } std::istream& read_stream(std::istream& is) { int t; is >> t; type_ = static_cast(t); uuid_.read_stream(is); is >> seq_; return is; } private: ViewType type_; UUID uuid_; // uniquely identifies the sequence of group views (?) uint32_t seq_; // position in the sequence (?) }; std::ostream& operator<<(std::ostream&, const ViewId&); typedef uint8_t SegmentId; class Node { public: Node(SegmentId segment = 0) : segment_(segment) { } SegmentId segment() const { return segment_; } bool operator==(const Node& cmp) const { return true; } bool operator<(const Node& cmp) const { return true; } std::ostream& write_stream(std::ostream& os) const { os << static_cast(segment_); return os; } std::istream& read_stream(std::istream& is) { int seg; is >> seg; segment_ = static_cast(seg); return is; } private: SegmentId segment_; }; inline std::ostream& operator<<(std::ostream& os, const Node& n) { return (os << static_cast(n.segment()) ); } class NodeList : public gcomm::Map { }; class View { public: View() : version_ (-1), bootstrap_ (false), view_id_ (V_NONE), members_ (), joined_ (), left_ (), partitioned_ () { } View(int version, const ViewId& view_id, bool bootstrap = false) : version_ (version), bootstrap_ (bootstrap), view_id_ (view_id), members_ (), joined_ (), left_ (), partitioned_ () { } ~View() {} int version() const { return version_; } void add_member (const UUID& pid, SegmentId segment); void add_members (NodeList::const_iterator begin, NodeList::const_iterator end); void add_joined (const UUID& pid, SegmentId segment); void add_left (const UUID& pid, SegmentId segment); void add_partitioned (const UUID& pid, SegmentId segment); const NodeList& members () const; const NodeList& joined () const; const NodeList& left () const; const NodeList& partitioned () const; NodeList& members() { return members_; } bool is_member(const UUID& uuid) const { return members_.find(uuid) != members_.end(); } bool is_joining(const UUID& uuid) const { return joined_.find(uuid) != joined_.end(); } bool is_leaving(const UUID& uuid) const { return left_.find(uuid) != left_.end(); } bool is_partitioning(const UUID& uuid) const { return partitioned_.find(uuid) != partitioned_.end(); } ViewType type () const; const ViewId& id () const; const UUID& representative () const; bool is_empty() const; bool is_bootstrap() const { return bootstrap_; } std::ostream& write_stream(std::ostream& os) const; std::istream& read_stream(std::istream& is); private: int version_; // view protocol version, derived from evs group bool bootstrap_; // Flag indicating if view was bootstrapped ViewId view_id_; // View identifier NodeList members_; // List of members in view NodeList joined_; // List of newly joined members in view NodeList left_; // Fracefully left members from previous view NodeList partitioned_; // Partitioned members from previous view }; bool operator==(const gcomm::View&, const gcomm::View&); std::ostream& operator<<(std::ostream&, const View&); class ViewState { public: ViewState(UUID& my_uuid, View& view, gu::Config& conf): my_uuid_(my_uuid), view_(view), file_name_(get_viewstate_file_name(conf)) { } std::ostream& write_stream(std::ostream& os) const; std::istream& read_stream(std::istream& is); void write_file() const; bool read_file(); static void remove_file(gu::Config& conf); bool operator== (const ViewState& vst) const { return my_uuid_ == vst.my_uuid_ && view_ == vst.view_; } private: UUID& my_uuid_; View& view_; std::string file_name_; static std::string get_viewstate_file_name(gu::Config& conf); }; } // namespace gcomm #endif // _GCOMM_VIEW_HPP_ galera-3-25.3.20/gcomm/src/gcomm/types.hpp0000644000015300001660000000431013042054732020004 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #ifndef _GCOMM_TYPES_HPP_ #define _GCOMM_TYPES_HPP_ #include "gcomm/exception.hpp" #include "gu_byteswap.hpp" #include "gu_buffer.hpp" #include #include #include namespace gcomm { template class String { public: String(const std::string& str = "") : str_(str) { if (str_.size() > str_size_) { gu_throw_error(EMSGSIZE); } } virtual ~String() { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { if (buflen < offset + str_size_) { gu_throw_error (EMSGSIZE) << str_size_ << " > " << (buflen-offset); } std::string ser_str(str_); ser_str.resize(str_size_, '\0'); (void)std::copy(ser_str.data(), ser_str.data() + ser_str.size(), buf + offset); return offset + str_size_; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { if (buflen < offset + str_size_) { gu_throw_error (EMSGSIZE) << str_size_ << " > " << (buflen-offset); } str_.assign(reinterpret_cast(buf) + offset, str_size_); const size_t tc(str_.find_first_of('\0')); if (tc != std::string::npos) { str_.resize(tc); } return offset + str_size_; } static size_t serial_size() { return str_size_; } const std::string& to_string() const { return str_; } bool operator==(const String& cmp) const { return (str_ == cmp.str_); } private: static const size_t str_size_ = SZ ; std::string str_; /* Human readable name if any */ }; template inline std::ostream& operator<<(std::ostream& os, const String& str) { return (os << str.to_string()); } } // namespace gcomm #endif /* _GCOMM_TYPES_HPP_ */ galera-3-25.3.20/gcomm/src/gcomm/order.hpp0000644000015300001660000000272413042054732017762 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! * @file order.hpp * * @brief Message order type enumeration. */ #ifndef GCOMM_ORDER_HPP #define GCOMM_ORDER_HPP namespace gcomm { /*! * @brief Message order type enumeration. */ enum Order { /*! Message will not be delivered, for protocol use only. */ O_DROP = 0, /*! Message delivery is unreliable, for protocol use only. */ O_UNRELIABLE = 1, /*! Message will be delivered in source fifo order. */ O_FIFO = 2, /*! * Message will be delivered in same order on all nodes * if it is delivered. */ O_AGREED = 3, /*! * Message will be delivered in safe order, it is guaranteed * that all the nodes in group have received the message. */ O_SAFE = 4, /*! * Message will be delivered only locally and delivery will fulfill the * following property: * * Let M_c be message tagged with O_LOCAL_CAUSAL ordering requirement. * Any message M_a which is delivered on any node so that delivery * has causal precedence on generating M_c will be delivered locally * before M_c. * * Note that the causality is guaranteed only with respect to * already delivered messages. */ O_LOCAL_CAUSAL = 8 }; } #endif // GCOMM_ORDER_HPP galera-3-25.3.20/gcomm/src/gcomm/protolay.hpp0000644000015300001660000002622013042054732020515 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ /*! * @file protolay.hpp * * @brief Protocol layer interface definitions. * * Protocol layer interface allows construction of protocol stacks * with consistent interface to send messages upwards or downwards in * stack. */ #ifndef GCOMM_PROTOLAY_HPP #define GCOMM_PROTOLAY_HPP #include "gcomm/view.hpp" #include "gcomm/exception.hpp" #include "gcomm/order.hpp" #include "gcomm/datagram.hpp" #include "gu_logger.hpp" #include "gu_datetime.hpp" #include "gu_config.hpp" #include "gu_status.hpp" #include #include #include // Declarations namespace gcomm { /*! * @class ProtoUpMeta * * Container for metadata passed upwards in protocol stack. */ class ProtoUpMeta; std::ostream& operator<<(std::ostream&, const ProtoUpMeta&); /*! * @class ProtoDownMeta * * Container for metadata passed downwards in protocol stack. */ class ProtoDownMeta; /*! * @class Protolay * * Protocol layer interface. */ class Protolay; /*! * @class Toplay * * Protolay that is on the top of the protocol stack. */ class Toplay; /*! * @class Bottomlay * * Protolay that is on the bottom of the protocol stack. */ class Bottomlay; void connect(Protolay*, Protolay*); void disconnect(Protolay*, Protolay*); } /* message context to pass up with the data buffer? */ class gcomm::ProtoUpMeta { public: ProtoUpMeta(const int err_no) : source_(), source_view_id_(), user_type_(), order_(), to_seq_(), err_no_(err_no), view_(0) { } ProtoUpMeta(const UUID source = UUID::nil(), const ViewId source_view_id = ViewId(), const View* view = 0, const uint8_t user_type = 0xff, const Order order = O_DROP, const int64_t to_seq = -1, const int err_no = 0) : source_ (source ), source_view_id_ (source_view_id ), user_type_ (user_type ), order_ (order ), to_seq_ (to_seq ), err_no_ (err_no ), view_ (view != 0 ? new View(*view) : 0) { } ProtoUpMeta(const ProtoUpMeta& um) : source_ (um.source_ ), source_view_id_ (um.source_view_id_ ), user_type_ (um.user_type_ ), order_ (um.order_ ), to_seq_ (um.to_seq_ ), err_no_ (um.err_no_ ), view_ (um.view_ ? new View(*um.view_) : 0) { } ~ProtoUpMeta() { delete view_; } const UUID& source() const { return source_; } const ViewId& source_view_id() const { return source_view_id_; } uint8_t user_type() const { return user_type_; } Order order() const { return order_; } int64_t to_seq() const { return to_seq_; } int err_no() const { return err_no_; } bool has_view() const { return view_ != 0; } const View& view() const { return *view_; } private: ProtoUpMeta& operator=(const ProtoUpMeta&); UUID const source_; ViewId const source_view_id_; uint8_t const user_type_; Order const order_; int64_t const to_seq_; int const err_no_; View* const view_; }; inline std::ostream& gcomm::operator<<(std::ostream& os, const ProtoUpMeta& um) { os << "proto_up_meta: { "; if (not (um.source() == UUID::nil())) { os << "source=" << um.source() << ","; } if (um.source_view_id().type() != V_NONE) { os << "source_view_id=" << um.source_view_id() << ","; } os << "user_type=" << static_cast(um.user_type()) << ","; os << "to_seq=" << um.to_seq() << ","; if (um.has_view() == true) { os << "view=" << um.view(); } os << "}"; return os; } /* message context to pass down? */ class gcomm::ProtoDownMeta { public: ProtoDownMeta(const uint8_t user_type = 0xff, const Order order = O_SAFE, const UUID& uuid = UUID::nil(), const int segment = 0) : user_type_ (user_type), order_ (order), source_ (uuid), segment_ (segment) { } uint8_t user_type() const { return user_type_; } Order order() const { return order_; } const UUID& source() const { return source_; } int segment() const { return segment_; } private: const uint8_t user_type_; const Order order_; const UUID source_; const int segment_; }; class gcomm::Protolay { public: typedef Map EvictList; virtual ~Protolay() {} virtual void connect(bool) { } virtual void close(bool force = false) { } virtual void close(const UUID& uuid) { } /* apparently handles data from upper layer. what is return value? */ virtual int handle_down (Datagram&, const ProtoDownMeta&) = 0; virtual void handle_up (const void*, const Datagram&, const ProtoUpMeta&) = 0; void set_up_context(Protolay *up) { if (std::find(up_context_.begin(), up_context_.end(), up) != up_context_.end()) { gu_throw_fatal << "up context already exists"; } up_context_.push_back(up); } void set_down_context(Protolay *down) { if (std::find(down_context_.begin(), down_context_.end(), down) != down_context_.end()) { gu_throw_fatal << "down context already exists"; } down_context_.push_back(down); } void unset_up_context(Protolay* up) { CtxList::iterator i; if ((i = std::find(up_context_.begin(), up_context_.end(), up)) == up_context_.end()) { gu_throw_fatal << "up context does not exist"; } up_context_.erase(i); } void unset_down_context(Protolay* down) { CtxList::iterator i; if ((i = std::find(down_context_.begin(), down_context_.end(), down)) == down_context_.end()) { gu_throw_fatal << "down context does not exist"; } down_context_.erase(i); } /* apparently passed data buffer to the upper layer */ void send_up(const Datagram& dg, const ProtoUpMeta& up_meta) { if (up_context_.empty() == true) { gu_throw_fatal << this << " up context(s) not set"; } CtxList::iterator i, i_next; for (i = up_context_.begin(); i != up_context_.end(); i = i_next) { i_next = i, ++i_next; (*i)->handle_up(this, dg, up_meta); } } /* apparently passes data buffer to lower layer, what is return value? */ int send_down(Datagram& dg, const ProtoDownMeta& down_meta) { if (down_context_.empty() == true) { log_warn << this << " down context(s) not set"; return ENOTCONN; } int ret = 0; for (CtxList::iterator i = down_context_.begin(); i != down_context_.end(); ++i) { const size_t hdr_offset(dg.header_offset()); int err = (*i)->handle_down(dg, down_meta); // Verify that lower layer rolls back any modifications to // header if (hdr_offset != dg.header_offset()) { gu_throw_fatal; } if (err != 0) { ret = err; } } return ret; } virtual void handle_stable_view(const View& view) { } void set_stable_view(const View& view) { for (CtxList::iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->handle_stable_view(view); } } virtual void handle_evict(const UUID& uuid) { } void evict(const UUID& uuid) { evict_list_.insert(std::make_pair(uuid, gu::datetime::Date::now())); handle_evict(uuid); for (CtxList::iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->evict(uuid); } } void unevict(const UUID& uuid) { evict_list_.erase(uuid); for (CtxList::iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->unevict(uuid); } } bool is_evicted(const UUID& uuid) const { if (down_context_.empty()) { return (evict_list_.find(uuid) != evict_list_.end()); } else { return (*down_context_.begin())->is_evicted(uuid); } } const EvictList& evict_list() const { return evict_list_; } virtual void handle_get_status(gu::Status& status) const { } void get_status(gu::Status& status) const { for (CtxList::const_iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->get_status(status); } handle_get_status(status); } std::string get_address(const UUID& uuid) const { if (down_context_.empty()) return handle_get_address(uuid); else return (*down_context_.begin())->get_address(uuid); } virtual std::string handle_get_address(const UUID& uuid) const { return "(unknown)"; } virtual gu::datetime::Date handle_timers() { return gu::datetime::Date::max(); } virtual bool set_param(const std::string& key, const std::string& val) { return false; } const Protolay* id() const { return this; } protected: Protolay(gu::Config& conf) : conf_(conf), up_context_(0), down_context_(0), evict_list_() { } gu::Config& conf_; private: typedef std::list CtxList; CtxList up_context_; CtxList down_context_; EvictList evict_list_; Protolay (const Protolay&); Protolay& operator=(const Protolay&); }; class gcomm::Toplay : protected Conf::Check, public Protolay { public: Toplay(gu::Config& conf) : Conf::Check(conf), Protolay(conf) { } private: int handle_down(Datagram& dg, const ProtoDownMeta& dm) { gu_throw_fatal << "Toplay handle_down() called"; } }; class gcomm::Bottomlay : public Protolay { public: Bottomlay(gu::Config& conf) : Protolay(conf) { } private: void handle_up(const void* id, const Datagram&, const ProtoUpMeta& um) { gu_throw_fatal << "Bottomlay handle_up() called"; } }; inline void gcomm::connect(Protolay* down, Protolay* up) { down->set_up_context(up); up->set_down_context(down); } inline void gcomm::disconnect(Protolay* down, Protolay* up) { down->unset_up_context(up); up->unset_down_context(down); } #endif /* GCOMM_PROTOLAY_HPP */ galera-3-25.3.20/gcomm/src/gcomm/protostack.hpp0000644000015300001660000000156413042054732021041 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_PROTOSTACK_HPP #define GCOMM_PROTOSTACK_HPP #include "gcomm/protolay.hpp" #include "gu_lock.hpp" #include #include namespace gcomm { class Socket; class Acceptor; class Protostack; class Protonet; class BoostProtonet; } class gcomm::Protostack { public: Protostack() : protos_(), mutex_() { } void push_proto(Protolay* p); void pop_proto(Protolay* p); gu::datetime::Date handle_timers(); void dispatch(const void* id, const Datagram& dg, const ProtoUpMeta& um); bool set_param(const std::string&, const std::string&); void enter() { mutex_.lock(); } void leave() { mutex_.unlock(); } private: friend class Protonet; std::deque protos_; gu::Mutex mutex_; }; #endif // GCOMM_PROTOSTACK_HPP galera-3-25.3.20/gcomm/src/gcomm/transport.hpp0000644000015300001660000000505113042054732020677 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ /*! * @file transport.hpp * * @brief Transport interface. */ #ifndef _GCOMM_TRANSPORT_HPP_ #define _GCOMM_TRANSPORT_HPP_ #include "gcomm/uuid.hpp" #include "gcomm/protolay.hpp" #include "gcomm/protostack.hpp" #include "gcomm/protonet.hpp" #include "gu_uri.hpp" namespace gcomm { /*! * @class Transport * * @brief Transport interface */ class Transport; } /*! * */ class gcomm::Transport : public Protolay { public: virtual ~Transport(); virtual size_t mtu() const = 0; virtual const UUID& uuid() const = 0; virtual std::string local_addr() const; virtual std::string remote_addr() const; int err_no() const; virtual void connect(bool start_prim) { gu_throw_fatal << "connect(start_prim) not supported"; } virtual void connect() // if not overloaded, will default to connect(bool) { connect(false); } virtual void connect(const gu::URI& uri) { gu_throw_fatal << "connect(URI) not supported"; } virtual void close(bool force = false) = 0; virtual void close(const UUID& uuid) { gu_throw_error(ENOTSUP) << "close(UUID) not supported by " << uri_.get_scheme(); } virtual void listen(); virtual std::string listen_addr() const { gu_throw_fatal << "not supported"; } virtual Transport* accept(); virtual void handle_accept(Transport*) { gu_throw_error(ENOTSUP) << "handle_accept() not supported by" << uri_.get_scheme(); } virtual void handle_connect() { gu_throw_error(ENOTSUP) << "handle_connect() not supported by" << uri_.get_scheme(); } virtual int handle_down(Datagram&, const ProtoDownMeta&) = 0; virtual void handle_up (const void*, const Datagram&, const ProtoUpMeta&) = 0; virtual void handle_stable_view(const View& view) { } Protostack& pstack() { return pstack_; } Protonet& pnet() { return pnet_; } static Transport* create(Protonet&, const std::string&); static Transport* create(Protonet&, const gu::URI&); protected: Transport (Protonet&, const gu::URI&); Protostack pstack_; Protonet& pnet_; gu::URI uri_; int error_no_; private: Transport (const Transport&); Transport& operator=(const Transport&); }; #endif // _GCOMM_TRANSPORT_HPP_ galera-3-25.3.20/gcomm/src/gcomm/util.hpp0000644000015300001660000000520213042054732017616 0ustar jenkinsjenkins/* * Copyright (C) 2012 Codership Oy */ #ifndef _GCOMM_UTIL_HPP_ #define _GCOMM_UTIL_HPP_ #include "gcomm/datagram.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" #include namespace gcomm { inline std::string uri_string (const std::string& scheme, const std::string& addr, const std::string& port = std::string("")) { if (port.length() > 0) return (scheme + "://" + addr + ':' + port); else return (scheme + "://" + addr); } inline bool host_is_any (const std::string& host) { return (host.length() == 0 || host == "0.0.0.0" || host.find ("::/128") <= 1); } template size_t serialize(const C& c, gu::Buffer& buf) { const size_t prev_size(buf.size()); buf.resize(buf.size() + c.serial_size()); size_t ret; gu_trace(ret = c.serialize(&buf[0] + prev_size, buf.size(), prev_size)); assert(ret == prev_size + c.serial_size()); return ret; } template size_t unserialize(const gu::Buffer& buf, size_t offset, C& c) { size_t ret; gu_trace(ret = c.unserialize(buf, buf.size(), offset)); return ret; } template void push_header(const M& msg, Datagram& dg) { if (dg.header_offset() < msg.serial_size()) { gu_throw_fatal; } msg.serialize(dg.header(), dg.header_size(), dg.header_offset() - msg.serial_size()); dg.set_header_offset(dg.header_offset() - msg.serial_size()); } template void pop_header(const M& msg, Datagram& dg) { assert(dg.header_size() >= dg.header_offset() + msg.serial_size()); dg.set_header_offset(dg.header_offset() + msg.serial_size()); } inline const gu::byte_t* begin(const Datagram& dg) { return (dg.offset() < dg.header_len() ? dg.header() + dg.header_offset() + dg.offset() : &dg.payload()[0] + (dg.offset() - dg.header_len())); } inline size_t available(const Datagram& dg) { return (dg.offset() < dg.header_len() ? dg.header_len() - dg.offset() : dg.payload().size() - (dg.offset() - dg.header_len())); } template class Critical { public: Critical(M& monitor) : monitor_(monitor) { monitor_.enter(); } ~Critical() { monitor_.leave(); } private: M& monitor_; }; } // namespace gcomm #endif // _GCOMM_UTIL_HPP_ galera-3-25.3.20/gcomm/src/defaults.hpp0000644000015300001660000000500613042054732017350 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #ifndef GCOMM_DEFAULTS_HPP #define GCOMM_DEFAULTS_HPP #include namespace gcomm { struct Defaults { static std::string const ProtonetBackend ; static std::string const ProtonetVersion ; static std::string const SocketChecksum ; static std::string const SocketRecvBufSize ; static std::string const GMCastVersion ; static std::string const GMCastTcpPort ; static std::string const GMCastSegment ; static std::string const GMCastTimeWait ; static std::string const GMCastPeerTimeout ; static std::string const EvsViewForgetTimeout ; static std::string const EvsViewForgetTimeoutMin ; static std::string const EvsInactiveCheckPeriod ; static std::string const EvsSuspectTimeout ; static std::string const EvsSuspectTimeoutMin ; static std::string const EvsInactiveTimeout ; static std::string const EvsInactiveTimeoutMin ; static std::string const EvsRetransPeriod ; static std::string const EvsRetransPeriodMin ; static std::string const EvsJoinRetransPeriod ; static std::string const EvsStatsReportPeriod ; static std::string const EvsStatsReportPeriodMin ; static std::string const EvsSendWindow ; static std::string const EvsSendWindowMin ; static std::string const EvsUserSendWindow ; static std::string const EvsUserSendWindowMin ; static std::string const EvsMaxInstallTimeouts ; static std::string const EvsDelayMargin ; static std::string const EvsDelayedKeepPeriod ; static std::string const EvsAutoEvict ; static std::string const PcAnnounceTimeout ; static std::string const PcChecksum ; static std::string const PcIgnoreQuorum ; static std::string const PcIgnoreSb ; static std::string const PcNpvo ; static std::string const PcVersion ; static std::string const PcWaitPrim ; static std::string const PcWaitPrimTimeout ; static std::string const PcWeight ; static std::string const PcRecovery ; }; } #endif // GCOMM_DEFAULTS_HPP galera-3-25.3.20/gcomm/src/asio_tcp.cpp0000644000015300001660000006444213042054732017346 0ustar jenkinsjenkins/* * Copyright (C) 2012 Codership Oy */ #include "asio_tcp.hpp" #include "gcomm/util.hpp" #include "gcomm/common.hpp" #define FAILED_HANDLER(_e) failed_handler(_e, __FUNCTION__, __LINE__) gcomm::AsioTcpSocket::AsioTcpSocket(AsioProtonet& net, const gu::URI& uri) : Socket (uri), net_ (net), socket_ (net.io_service_), #ifdef HAVE_ASIO_SSL_HPP ssl_socket_ (0), #endif /* HAVE_ASIO_SSL_HPP */ send_q_ (), recv_buf_ (net_.mtu() + NetHeader::serial_size_), recv_offset_ (0), state_ (S_CLOSED), local_addr_ (), remote_addr_ () { log_debug << "ctor for " << id(); } gcomm::AsioTcpSocket::~AsioTcpSocket() { log_debug << "dtor for " << id(); close_socket(); #ifdef HAVE_ASIO_SSL_HPP delete ssl_socket_; ssl_socket_ = 0; #endif /* HAVE_ASIO_SSL_HPP */ } void gcomm::AsioTcpSocket::failed_handler(const asio::error_code& ec, const std::string& func, int line) { log_debug << "failed handler from " << func << ":" << line << " socket " << id() << " " << socket_.native() << " error " << ec << " " << socket_.is_open() << " state " << state(); try { log_debug << "local endpoint " << local_addr() << " remote endpoint " << remote_addr(); } catch (...) { } const State prev_state(state()); if (state() != S_CLOSED) { state_ = S_FAILED; } if (prev_state != S_FAILED && prev_state != S_CLOSED) { net_.dispatch(id(), Datagram(), ProtoUpMeta(ec.value())); } } #ifdef HAVE_ASIO_SSL_HPP void gcomm::AsioTcpSocket::handshake_handler(const asio::error_code& ec) { if (ec) { if (ec.category() == asio::error::get_ssl_category() && gu::exclude_ssl_error(ec) == false) { log_error << "handshake with remote endpoint " << remote_addr() << " failed: " << ec << ": '" << ec.message() << "' ( " << gu::extra_error_info(ec) << ")"; } FAILED_HANDLER(ec); return; } if (ssl_socket_ == 0) { log_error << "handshake handler called for non-SSL socket " << id() << " " << remote_addr() << " <-> " << local_addr(); FAILED_HANDLER(asio::error_code(EPROTO, asio::error::system_category)); return; } log_info << "SSL handshake successful, " << "remote endpoint " << remote_addr() << " local endpoint " << local_addr() << " cipher: " << gu::cipher(*ssl_socket_) << " compression: " << gu::compression(*ssl_socket_); state_ = S_CONNECTED; net_.dispatch(id(), Datagram(), ProtoUpMeta(ec.value())); async_receive(); } #endif /* HAVE_ASIO_SSL_HPP */ void gcomm::AsioTcpSocket::connect_handler(const asio::error_code& ec) { Critical crit(net_); try { if (ec) { FAILED_HANDLER(ec); return; } else { assign_local_addr(); assign_remote_addr(); set_socket_options(); #ifdef HAVE_ASIO_SSL_HPP if (ssl_socket_ != 0) { log_debug << "socket " << id() << " connected, remote endpoint " << remote_addr() << " local endpoint " << local_addr(); ssl_socket_->async_handshake( asio::ssl::stream::client, boost::bind(&AsioTcpSocket::handshake_handler, shared_from_this(), asio::placeholders::error) ); } else { #endif /* HAVE_ASIO_SSL_HPP */ log_debug << "socket " << id() << " connected, remote endpoint " << remote_addr() << " local endpoint " << local_addr(); state_ = S_CONNECTED; net_.dispatch(id(), Datagram(), ProtoUpMeta(ec.value())); async_receive(); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } } catch (asio::system_error& e) { FAILED_HANDLER(e.code()); } } void gcomm::AsioTcpSocket::connect(const gu::URI& uri) { try { Critical crit(net_); asio::ip::tcp::resolver resolver(net_.io_service_); // Give query flags explicitly to avoid having AI_ADDRCONFIG in // underlying getaddrinfo() hint flags. asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); asio::ip::tcp::resolver::iterator i(resolver.resolve(query)); #ifdef HAVE_ASIO_SSL_HPP if (uri.get_scheme() == gu::scheme::ssl) { ssl_socket_ = new asio::ssl::stream( net_.io_service_, net_.ssl_context_ ); ssl_socket_->lowest_layer().async_connect( *i, boost::bind(&AsioTcpSocket::connect_handler, shared_from_this(), asio::placeholders::error) ); } else { #endif /* HAVE_ASIO_SSL_HPP */ const std::string bind_ip = uri.get_option(gcomm::Socket::OptIfAddr, ""); if (!bind_ip.empty()) { socket_.open(i->endpoint().protocol()); asio::ip::tcp::endpoint ep( asio::ip::address::from_string(bind_ip), // connect from any port. 0); socket_.bind(ep); } socket_.async_connect(*i, boost::bind(&AsioTcpSocket::connect_handler, shared_from_this(), asio::placeholders::error)); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ state_ = S_CONNECTING; } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "error while connecting to remote host " << uri.to_string() << "', asio error '" << e.what() << "'"; } } void gcomm::AsioTcpSocket::close() { Critical crit(net_); if (state() == S_CLOSED || state() == S_CLOSING) return; log_debug << "closing " << id() << " state " << state() << " send_q size " << send_q_.size(); if (send_q_.empty() == true || state() != S_CONNECTED) { close_socket(); state_ = S_CLOSED; } else { state_ = S_CLOSING; } } void gcomm::AsioTcpSocket::write_handler(const asio::error_code& ec, size_t bytes_transferred) { Critical crit(net_); if (state() != S_CONNECTED && state() != S_CLOSING) { log_debug << "write handler for " << id() << " state " << state(); if (ec.category() == asio::error::get_ssl_category() && gu::exclude_ssl_error(ec) == false) { log_warn << "write_handler(): " << ec.message() << " (" << gu::extra_error_info(ec) << ")"; } return; } if (!ec) { gcomm_assert(send_q_.empty() == false); gcomm_assert(send_q_.front().len() >= bytes_transferred); while (send_q_.empty() == false && bytes_transferred >= send_q_.front().len()) { const Datagram& dg(send_q_.front()); bytes_transferred -= dg.len(); send_q_.pop_front(); } gcomm_assert(bytes_transferred == 0); if (send_q_.empty() == false) { const Datagram& dg(send_q_.front()); boost::array cbs; cbs[0] = asio::const_buffer(dg.header() + dg.header_offset(), dg.header_len()); cbs[1] = asio::const_buffer(&dg.payload()[0], dg.payload().size()); write_one(cbs); } else if (state_ == S_CLOSING) { log_debug << "deferred close of " << id(); close_socket(); state_ = S_CLOSED; } } else if (state_ == S_CLOSING) { log_debug << "deferred close of " << id() << " error " << ec; close_socket(); state_ = S_CLOSED; } else { FAILED_HANDLER(ec); } } void gcomm::AsioTcpSocket::set_option(const std::string& key, const std::string& val) { if (key == Conf::SocketRecvBufSize) { size_t llval; gu_trace(llval = Conf::check_recv_buf_size(val)); socket().set_option(asio::socket_base::receive_buffer_size(llval)); #if GCOMM_CHECK_RECV_BUF_SIZE check_socket_option (key, llval); #endif } } namespace gcomm { typedef boost::shared_ptr AsioTcpSocketPtr; class AsioPostForSendHandler { public: AsioPostForSendHandler(const AsioTcpSocketPtr& socket) : socket_(socket) { } void operator()() { if (socket_->state() == gcomm::Socket::S_CONNECTED && socket_->send_q_.empty() == false) { const gcomm::Datagram& dg(socket_->send_q_.front()); boost::array cbs; cbs[0] = asio::const_buffer(dg.header() + dg.header_offset(), dg.header_len()); cbs[1] = asio::const_buffer(&dg.payload()[0], dg.payload().size()); socket_->write_one(cbs); } } private: AsioTcpSocketPtr socket_; }; } int gcomm::AsioTcpSocket::send(const Datagram& dg) { Critical crit(net_); if (state() != S_CONNECTED) { return ENOTCONN; } NetHeader hdr(static_cast(dg.len()), net_.version_); if (net_.checksum_ != NetHeader::CS_NONE) { hdr.set_crc32(crc32(net_.checksum_, dg), net_.checksum_); } send_q_.push_back(dg); // makes copy of dg Datagram& priv_dg(send_q_.back()); priv_dg.set_header_offset(priv_dg.header_offset() - NetHeader::serial_size_); serialize(hdr, priv_dg.header(), priv_dg.header_size(), priv_dg.header_offset()); if (send_q_.size() == 1) { net_.io_service_.post(AsioPostForSendHandler(shared_from_this())); } return 0; } void gcomm::AsioTcpSocket::read_handler(const asio::error_code& ec, const size_t bytes_transferred) { Critical crit(net_); if (ec) { if (ec.category() == asio::error::get_ssl_category() && gu::exclude_ssl_error(ec) == false) { log_warn << "read_handler(): " << ec.message() << " (" << gu::extra_error_info(ec) << ")"; } FAILED_HANDLER(ec); return; } if (state() != S_CONNECTED && state() != S_CLOSING) { log_debug << "read handler for " << id() << " state " << state(); return; } recv_offset_ += bytes_transferred; while (recv_offset_ >= NetHeader::serial_size_) { NetHeader hdr; try { unserialize(&recv_buf_[0], recv_buf_.size(), 0, hdr); } catch (gu::Exception& e) { FAILED_HANDLER(asio::error_code(e.get_errno(), asio::error::system_category)); return; } if (recv_offset_ >= hdr.len() + NetHeader::serial_size_) { Datagram dg( gu::SharedBuffer( new gu::Buffer(&recv_buf_[0] + NetHeader::serial_size_, &recv_buf_[0] + NetHeader::serial_size_ + hdr.len()))); if (net_.checksum_ != NetHeader::CS_NONE) { #ifdef TEST_NET_CHECKSUM_ERROR long rnd(rand()); if (rnd % 10000 == 0) { hdr.set_crc32(net_.checksum_, static_cast(rnd)); } #endif /* TEST_NET_CHECKSUM_ERROR */ if (check_cs (hdr, dg)) { log_warn << "checksum failed, hdr: len=" << hdr.len() << " has_crc32=" << hdr.has_crc32() << " has_crc32c=" << hdr.has_crc32c() << " crc32=" << hdr.crc32(); FAILED_HANDLER(asio::error_code( EPROTO, asio::error::system_category)); return; } } ProtoUpMeta um; net_.dispatch(id(), dg, um); recv_offset_ -= NetHeader::serial_size_ + hdr.len(); if (recv_offset_ > 0) { memmove(&recv_buf_[0], &recv_buf_[0] + NetHeader::serial_size_ + hdr.len(), recv_offset_); } } else { break; } } boost::array mbs; mbs[0] = asio::mutable_buffer(&recv_buf_[0] + recv_offset_, recv_buf_.size() - recv_offset_); read_one(mbs); } size_t gcomm::AsioTcpSocket::read_completion_condition( const asio::error_code& ec, const size_t bytes_transferred) { Critical crit(net_); if (ec) { if (ec.category() == asio::error::get_ssl_category() && gu::exclude_ssl_error(ec) == false) { log_warn << "read_completion_condition(): " << ec.message() << " (" << gu::extra_error_info(ec) << ")"; } FAILED_HANDLER(ec); return 0; } if (state() != S_CONNECTED && state() != S_CLOSING) { log_debug << "read completion condition for " << id() << " state " << state(); return 0; } if (recv_offset_ + bytes_transferred >= NetHeader::serial_size_) { NetHeader hdr; try { unserialize(&recv_buf_[0], NetHeader::serial_size_, 0, hdr); } catch (gu::Exception& e) { log_warn << "unserialize error " << e.what(); FAILED_HANDLER(asio::error_code(e.get_errno(), asio::error::system_category)); return 0; } if (recv_offset_ + bytes_transferred >= NetHeader::serial_size_ + hdr.len()) { return 0; } } return (recv_buf_.size() - recv_offset_); } void gcomm::AsioTcpSocket::async_receive() { Critical crit(net_); gcomm_assert(state() == S_CONNECTED); boost::array mbs; mbs[0] = asio::mutable_buffer(&recv_buf_[0], recv_buf_.size()); read_one(mbs); } size_t gcomm::AsioTcpSocket::mtu() const { return net_.mtu(); } std::string gcomm::AsioTcpSocket::local_addr() const { return local_addr_; } std::string gcomm::AsioTcpSocket::remote_addr() const { return remote_addr_; } void gcomm::AsioTcpSocket::set_socket_options() { basic_socket_t& sock(socket()); gu::set_fd_options(sock); sock.set_option(asio::ip::tcp::no_delay(true)); size_t const recv_buf_size (net_.conf().get(gcomm::Conf::SocketRecvBufSize)); assert(ssize_t(recv_buf_size) >= 0); // this should have been checked already sock.set_option(asio::socket_base::receive_buffer_size(recv_buf_size)); #if GCOMM_CHECK_RECV_BUF_SIZE size_t new_val(check_socket_option (gcomm::Conf::SocketRecvBufSize, recv_buf_size)); if (new_val < recv_buf_size) { // apparently there's a limit net_.conf().set(gcomm::Conf::SocketRecvBufSize, new_val); } #else asio::socket_base::receive_buffer_size option; sock.get_option(option); log_debug << "socket recv buf size " << option.value(); #endif } void gcomm::AsioTcpSocket::read_one(boost::array& mbs) { #ifdef HAVE_ASIO_SSL_HPP if (ssl_socket_ != 0) { async_read(*ssl_socket_, mbs, boost::bind(&AsioTcpSocket::read_completion_condition, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred), boost::bind(&AsioTcpSocket::read_handler, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred)); } else { #endif /* HAVE_ASIO_SSL_HPP */ async_read(socket_, mbs, boost::bind(&AsioTcpSocket::read_completion_condition, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred), boost::bind(&AsioTcpSocket::read_handler, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred)); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } void gcomm::AsioTcpSocket::write_one( const boost::array& cbs) { #ifdef HAVE_ASIO_SSL_HPP if (ssl_socket_ != 0) { async_write(*ssl_socket_, cbs, boost::bind(&AsioTcpSocket::write_handler, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred)); } else { #endif /* HAVE_ASIO_SSL_HPP */ async_write(socket_, cbs, boost::bind(&AsioTcpSocket::write_handler, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred)); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } void gcomm::AsioTcpSocket::close_socket() { try { #ifdef HAVE_ASIO_SSL_HPP if (ssl_socket_ != 0) { // close underlying transport before calling shutdown() // to avoid blocking ssl_socket_->lowest_layer().close(); ssl_socket_->shutdown(); } else { #endif /* HAVE_ASIO_SSL_HPP */ socket_.close(); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } catch (...) { } } void gcomm::AsioTcpSocket::assign_local_addr() { #ifdef HAVE_ASIO_SSL_HPP if (ssl_socket_ != 0) { local_addr_ = gcomm::uri_string( gu::scheme::ssl, gu::escape_addr( ssl_socket_->lowest_layer().local_endpoint().address()), gu::to_string( ssl_socket_->lowest_layer().local_endpoint().port()) ); } else { #endif /* HAVE_ASIO_SSL_HPP */ local_addr_ = gcomm::uri_string( gu::scheme::tcp, gu::escape_addr(socket_.local_endpoint().address()), gu::to_string(socket_.local_endpoint().port()) ); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } void gcomm::AsioTcpSocket::assign_remote_addr() { #ifdef HAVE_ASIO_SSL_HPP if (ssl_socket_ != 0) { remote_addr_ = gcomm::uri_string( gu::scheme::ssl, gu::escape_addr( ssl_socket_->lowest_layer().remote_endpoint().address()), gu::to_string( ssl_socket_->lowest_layer().remote_endpoint().port()) ); } else { #endif /* HAVE_ASIO_SSL_HPP */ remote_addr_ = uri_string( gu::scheme::tcp, gu::escape_addr(socket_.remote_endpoint().address()), gu::to_string(socket_.remote_endpoint().port()) ); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } gcomm::AsioTcpAcceptor::AsioTcpAcceptor(AsioProtonet& net, const gu::URI& uri) : Acceptor (uri), net_ (net), acceptor_ (net_.io_service_), accepted_socket_() { } gcomm::AsioTcpAcceptor::~AsioTcpAcceptor() { close(); } void gcomm::AsioTcpAcceptor::accept_handler( SocketPtr socket, const asio::error_code& error) { if (!error) { AsioTcpSocket* s(static_cast(socket.get())); try { s->assign_local_addr(); s->assign_remote_addr(); s->set_socket_options(); #ifdef HAVE_ASIO_SSL_HPP if (s->ssl_socket_ != 0) { log_debug << "socket " << s->id() << " connected, remote endpoint " << s->remote_addr() << " local endpoint " << s->local_addr(); s->ssl_socket_->async_handshake( asio::ssl::stream::server, boost::bind(&AsioTcpSocket::handshake_handler, s->shared_from_this(), asio::placeholders::error)); s->state_ = Socket::S_CONNECTING; } else { #endif /* HAVE_ASIO_SSL_HP */ s->state_ = Socket::S_CONNECTED; #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ accepted_socket_ = socket; log_debug << "accepted socket " << socket->id(); net_.dispatch(id(), Datagram(), ProtoUpMeta(error.value())); } catch (asio::system_error& e) { // socket object should be freed automatically when it // goes out of scope log_debug << "accept failed: " << e.what(); } AsioTcpSocket* new_socket(new AsioTcpSocket(net_, uri_)); #ifdef HAVE_ASIO_SSL_HPP if (uri_.get_scheme() == gu::scheme::ssl) { new_socket->ssl_socket_ = new asio::ssl::stream( net_.io_service_, net_.ssl_context_); acceptor_.async_accept(new_socket->ssl_socket_->lowest_layer(), boost::bind(&AsioTcpAcceptor::accept_handler, this, SocketPtr(new_socket), asio::placeholders::error)); } else { #endif /* HAVE_ASIO_SSL_HPP */ acceptor_.async_accept(new_socket->socket_, boost::bind(&AsioTcpAcceptor::accept_handler, this, SocketPtr(new_socket), asio::placeholders::error)); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } else { log_warn << "accept handler: " << error; } } void gcomm::AsioTcpAcceptor::listen(const gu::URI& uri) { try { asio::ip::tcp::resolver resolver(net_.io_service_); // Give query flags explicitly to avoid having AI_ADDRCONFIG in // underlying getaddrinfo() hint flags. asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); asio::ip::tcp::resolver::iterator i(resolver.resolve(query)); acceptor_.open(i->endpoint().protocol()); acceptor_.set_option(asio::ip::tcp::socket::reuse_address(true)); gu::set_fd_options(acceptor_); acceptor_.bind(*i); acceptor_.listen(); AsioTcpSocket* new_socket(new AsioTcpSocket(net_, uri)); #ifdef HAVE_ASIO_SSL_HPP if (uri_.get_scheme() == gu::scheme::ssl) { new_socket->ssl_socket_ = new asio::ssl::stream( net_.io_service_, net_.ssl_context_); acceptor_.async_accept(new_socket->ssl_socket_->lowest_layer(), boost::bind(&AsioTcpAcceptor::accept_handler, this, SocketPtr(new_socket), asio::placeholders::error)); } else { #endif /* HAVE_ASIO_SSL_HPP */ acceptor_.async_accept(new_socket->socket_, boost::bind(&AsioTcpAcceptor::accept_handler, this, SocketPtr(new_socket), asio::placeholders::error)); #ifdef HAVE_ASIO_SSL_HPP } #endif /* HAVE_ASIO_SSL_HPP */ } catch (asio::system_error& e) { log_error << e.what(); gu_throw_error(e.code().value()) << "error while trying to listen '" << uri.to_string() << "', asio error '" << e.what() << "'"; } } std::string gcomm::AsioTcpAcceptor::listen_addr() const { try { return uri_string( uri_.get_scheme(), gu::escape_addr(acceptor_.local_endpoint().address()), gu::to_string(acceptor_.local_endpoint().port()) ); } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "failed to read listen addr " << "', asio error '" << e.what() << "'"; } } void gcomm::AsioTcpAcceptor::close() { try { acceptor_.close(); } catch (...) { } } gcomm::SocketPtr gcomm::AsioTcpAcceptor::accept() { if (accepted_socket_->state() == Socket::S_CONNECTED) { accepted_socket_->async_receive(); } return accepted_socket_; } galera-3-25.3.20/gcomm/src/gmcast_message.hpp0000644000015300001660000002645213042054732020533 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #ifndef GCOMM_GMCAST_MESSAGE_HPP #define GCOMM_GMCAST_MESSAGE_HPP #include "gcomm/types.hpp" #include "gcomm/uuid.hpp" #include "gmcast_node.hpp" #include "gcomm/map.hpp" namespace gcomm { namespace gmcast { class Message; } } class gcomm::gmcast::Message { public: enum Flags { F_GROUP_NAME = 1 << 0, F_NODE_NAME = 1 << 1, F_NODE_ADDRESS_OR_ERROR = 1 << 2, F_NODE_LIST = 1 << 3, F_HANDSHAKE_UUID = 1 << 4, // relay message to all peers in the same segment (excluding source) // and to all other segments except source segment F_RELAY = 1 << 5, // relay message to all peers in the same segment F_SEGMENT_RELAY = 1 << 6 }; enum Type { T_INVALID = 0, T_HANDSHAKE = 1, T_HANDSHAKE_RESPONSE = 2, T_OK = 3, T_FAIL = 4, T_TOPOLOGY_CHANGE = 5, T_KEEPALIVE = 6, /* Leave room for future use */ T_USER_BASE = 8, T_MAX = 255 }; class NodeList : public Map { }; private: gu::byte_t version_; Type type_; gu::byte_t flags_; gu::byte_t segment_id_; gcomm::UUID handshake_uuid_; gcomm::UUID source_uuid_; gcomm::String<64> node_address_or_error_; gcomm::String<32> group_name_; Message& operator=(const Message&); NodeList node_list_; public: static const char* type_to_string (Type t) { static const char* str[T_MAX] = { "INVALID", "HANDSHAKE", "HANDSHAKE_RESPONSE", "HANDSHAKE_OK", "HANDSHAKE_FAIL", "TOPOLOGY_CHANGE", "KEEPALIVE", "RESERVED_7", "USER_BASE" }; if (T_MAX > t) return str[t]; return "UNDEFINED PACKET TYPE"; } Message(const Message& msg) : version_ (msg.version_), type_ (msg.type_), flags_ (msg.flags_), segment_id_ (msg.segment_id_), handshake_uuid_ (msg.handshake_uuid_), source_uuid_ (msg.source_uuid_), node_address_or_error_ (msg.node_address_or_error_), group_name_ (msg.group_name_), node_list_ (msg.node_list_) { } /* Default ctor */ Message () : version_ (0), type_ (T_INVALID), flags_ (0), segment_id_ (0), handshake_uuid_ (), source_uuid_ (), node_address_or_error_ (), group_name_ (), node_list_ () {} /* Ctor for handshake */ Message (int version, const Type type, const UUID& handshake_uuid, const UUID& source_uuid, uint8_t segment_id) : version_ (version), type_ (type), flags_ (F_HANDSHAKE_UUID), segment_id_ (segment_id), handshake_uuid_ (handshake_uuid), source_uuid_ (source_uuid), node_address_or_error_ (), group_name_ (), node_list_ () { if (type_ != T_HANDSHAKE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in handshake constructor"; } /* ok, fail and keepalive */ Message (int version, const Type type, const UUID& source_uuid, uint8_t segment_id, const std::string& error) : version_ (version), type_ (type), flags_ (error.size() > 0 ? F_NODE_ADDRESS_OR_ERROR : 0), segment_id_ (segment_id), handshake_uuid_ (), source_uuid_ (source_uuid), node_address_or_error_ (error), group_name_ (), node_list_ () { if (type_ != T_OK && type_ != T_FAIL && type_ != T_KEEPALIVE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in ok/fail/keepalive constructor"; } /* Ctor for user message */ Message (int version, const Type type, const UUID& source_uuid, const int ttl, uint8_t segment_id) : version_ (version), type_ (type), flags_ (0), segment_id_ (segment_id), handshake_uuid_ (), source_uuid_ (source_uuid), node_address_or_error_ (), group_name_ (), node_list_ () { if (type_ < T_USER_BASE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in user message constructor"; } /* Ctor for handshake response */ Message (int version, const Type type, const gcomm::UUID& handshake_uuid, const gcomm::UUID& source_uuid, const std::string& node_address, const std::string& group_name, uint8_t segment_id) : version_ (version), type_ (type), flags_ (F_GROUP_NAME | F_NODE_ADDRESS_OR_ERROR | F_HANDSHAKE_UUID), segment_id_ (segment_id), handshake_uuid_ (handshake_uuid), source_uuid_ (source_uuid), node_address_or_error_ (node_address), group_name_ (group_name), node_list_ () { if (type_ != T_HANDSHAKE_RESPONSE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in handshake response constructor"; } /* Ctor for topology change */ Message (int version, const Type type, const gcomm::UUID& source_uuid, const std::string& group_name, const NodeList& nodes) : version_ (version), type_ (type), flags_ (F_GROUP_NAME | F_NODE_LIST), segment_id_ (0), handshake_uuid_ (), source_uuid_ (source_uuid), node_address_or_error_ (), group_name_ (group_name), node_list_ (nodes) { if (type_ != T_TOPOLOGY_CHANGE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in topology change constructor"; } ~Message() { } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; gu_trace (off = gu::serialize1(version_, buf, buflen, offset)); gu_trace (off = gu::serialize1(static_cast(type_),buf,buflen,off)); gu_trace (off = gu::serialize1(flags_, buf, buflen, off)); gu_trace (off = gu::serialize1(segment_id_, buf, buflen, off)); gu_trace (off = source_uuid_.serialize(buf, buflen, off)); if (flags_ & F_HANDSHAKE_UUID) { gu_trace(off = handshake_uuid_.serialize(buf, buflen, off)); } if (flags_ & F_NODE_ADDRESS_OR_ERROR) { gu_trace (off = node_address_or_error_.serialize(buf, buflen, off)); } if (flags_ & F_GROUP_NAME) { gu_trace (off = group_name_.serialize(buf, buflen, off)); } if (flags_ & F_NODE_LIST) { gu_trace(off = node_list_.serialize(buf, buflen, off)); } return off; } size_t read_v0(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; gu::byte_t t; gu_trace (off = gu::unserialize1(buf, buflen, offset, t)); type_ = static_cast(t); switch (type_) { case T_HANDSHAKE: case T_HANDSHAKE_RESPONSE: case T_OK: case T_FAIL: case T_TOPOLOGY_CHANGE: case T_KEEPALIVE: case T_USER_BASE: break; default: gu_throw_error(EINVAL) << "invalid message type " << static_cast(type_); } gu_trace (off = gu::unserialize1(buf, buflen, off, flags_)); gu_trace (off = gu::unserialize1(buf, buflen, off, segment_id_)); gu_trace (off = source_uuid_.unserialize(buf, buflen, off)); if (flags_ & F_HANDSHAKE_UUID) { gu_trace(off = handshake_uuid_.unserialize(buf, buflen, off)); } if (flags_ & F_NODE_ADDRESS_OR_ERROR) { gu_trace (off = node_address_or_error_.unserialize(buf, buflen, off)); } if (flags_ & F_GROUP_NAME) { gu_trace (off = group_name_.unserialize(buf, buflen, off)); } if (flags_ & F_NODE_LIST) { gu_trace(off = node_list_.unserialize(buf, buflen, off)); } return off; } size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; gu_trace (off = gu::unserialize1(buf, buflen, offset, version_)); switch (version_) { case 0: gu_trace (return read_v0(buf, buflen, off)); default: gu_throw_error(EPROTONOSUPPORT) << "Unsupported/unrecognized gmcast protocol version: " << version_; } } size_t serial_size() const { return 4 /* Common header: version, type, flags, segment_id */ + source_uuid_.serial_size() + (flags_ & F_HANDSHAKE_UUID ? handshake_uuid_.serial_size() : 0) /* GMCast address if set */ + (flags_ & F_NODE_ADDRESS_OR_ERROR ? node_address_or_error_.serial_size() : 0) /* Group name if set */ + (flags_ & F_GROUP_NAME ? group_name_.serial_size() : 0) /* Node list if set */ + (flags_ & F_NODE_LIST ? node_list_.serial_size() : 0); } int version() const { return version_; } Type type() const { return type_; } void set_flags(uint8_t f) { flags_ = f; } uint8_t flags() const { return flags_; } uint8_t segment_id() const { return segment_id_; } const UUID& handshake_uuid() const { return handshake_uuid_; } const UUID& source_uuid() const { return source_uuid_; } const std::string& node_address() const { return node_address_or_error_.to_string(); } const std::string& error() const { return node_address_or_error_.to_string(); } const std::string& group_name() const { return group_name_.to_string(); } const NodeList& node_list() const { return node_list_; } }; #endif // GCOMM_GMCAST_MESSAGE_HPP galera-3-25.3.20/gcomm/src/asio_tcp.hpp0000644000015300001660000001111013042054732017333 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy */ #ifndef GCOMM_ASIO_TCP_HPP #define GCOMM_ASIO_TCP_HPP #include "socket.hpp" #include "asio_protonet.hpp" #include #include #include #include #include // // Boost enable_shared_from_this<> does not have virtual destructor, // therefore need to ignore -Weffc++ // #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif namespace gcomm { class AsioTcpSocket; class AsioTcpAcceptor; class AsioPostForSendHandler; } // TCP Socket implementation class gcomm::AsioTcpSocket : public gcomm::Socket, public boost::enable_shared_from_this { public: AsioTcpSocket(AsioProtonet& net, const gu::URI& uri); ~AsioTcpSocket(); void failed_handler(const asio::error_code& ec, const std::string& func, int line); #ifdef HAVE_ASIO_SSL_HPP void handshake_handler(const asio::error_code& ec); #endif // HAVE_ASIO_SSL_HPP void connect_handler(const asio::error_code& ec); void connect(const gu::URI& uri); void close(); void write_handler(const asio::error_code& ec, size_t bytes_transferred); void set_option(const std::string& key, const std::string& val); int send(const Datagram& dg); size_t read_completion_condition( const asio::error_code& ec, const size_t bytes_transferred); void read_handler(const asio::error_code& ec, const size_t bytes_transferred); void async_receive(); size_t mtu() const; std::string local_addr() const; std::string remote_addr() const; State state() const { return state_; } SocketId id() const { return &socket_; } private: friend class gcomm::AsioTcpAcceptor; friend class gcomm::AsioPostForSendHandler; AsioTcpSocket(const AsioTcpSocket&); void operator=(const AsioTcpSocket&); void set_socket_options(); void read_one(boost::array& mbs); void write_one(const boost::array& cbs); void close_socket(); // call to assign local/remote addresses at the point where it // is known that underlying socket is live void assign_local_addr(); void assign_remote_addr(); // returns real socket to use typedef asio::basic_socket > basic_socket_t; basic_socket_t& socket() { return (ssl_socket_ ? ssl_socket_->lowest_layer() : socket_); } AsioProtonet& net_; asio::ip::tcp::socket socket_; #ifdef HAVE_ASIO_SSL_HPP asio::ssl::stream* ssl_socket_; #endif // HAVE_ASIO_SSL_HPP std::deque send_q_; std::vector recv_buf_; size_t recv_offset_; State state_; // Querying addresses from failed socket does not work, // so need to maintain copy for diagnostics logging std::string local_addr_; std::string remote_addr_; template unsigned long long check_socket_option(const std::string& key, unsigned long long val) { T option; socket().get_option(option); if (val != static_cast(option.value())) { log_info << "Setting '" << key << "' to " << val << " failed. Resulting value is " << option.value(); } return option.value(); } }; class gcomm::AsioTcpAcceptor : public gcomm::Acceptor { public: AsioTcpAcceptor(AsioProtonet& net, const gu::URI& uri); ~AsioTcpAcceptor(); void accept_handler( SocketPtr socket, const asio::error_code& error); void listen(const gu::URI& uri); std::string listen_addr() const; void close(); SocketPtr accept(); State state() const { gu_throw_fatal << "TODO:"; } SocketId id() const { return &acceptor_; } private: AsioProtonet& net_; asio::ip::tcp::acceptor acceptor_; SocketPtr accepted_socket_; }; #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif #endif // GCOMM_ASIO_TCP_HPP galera-3-25.3.20/gcomm/src/evs_proto.hpp0000644000015300001660000004132213042054732017562 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ /*! * @file evs_proto.hpp * * @brief EVS protocol implementation header. */ #ifndef GCOMM_EVS_PROTO_HPP #define GCOMM_EVS_PROTO_HPP #include "gcomm/protolay.hpp" #include "gcomm/view.hpp" #include "gcomm/transport.hpp" #include "gcomm/map.hpp" #include "gu_histogram.hpp" #include "gu_stats.hpp" #include "profile.hpp" #include "evs_seqno.hpp" #include "evs_node.hpp" #include "evs_consensus.hpp" #include "protocol_version.hpp" #include "gu_datetime.hpp" #include #include #include #include namespace gcomm { namespace evs { class Message; class MessageNodeList; class UserMessage; class DelegateMessage; class GapMessage; class JoinMessage; class InstallMessage; class LeaveMessage; class InputMap; class InputMapMsg; class Proto; std::ostream& operator<<(std::ostream&, const Proto&); // // Helper class for getting the location where // certain methods are called from. // // Example usage: // Method prototype: // void fun(EVS_CALLER_ARG, int a) // // Calling: // fun(EVS_CALLER, a) // // Logging inside function: // log_debug << EVS_LOG_METHOD << "log message" // class Caller { public: Caller(const char* const file, const int line) : file_(file), line_(line) { } friend std::ostream& operator<<(std::ostream&, const Caller&); private: const char* const file_; const int line_; }; inline std::ostream& operator<<(std::ostream& os, const Caller& caller) { return (os << caller.file_ << ": " << caller.line_ << ": "); } #define EVS_CALLER_ARG const Caller& caller #define EVS_CALLER Caller(__FILE__, __LINE__) #define EVS_LOG_METHOD __FUNCTION__ << " called from " << caller } } /*! * @brief Class implementing EVS protocol */ class gcomm::evs::Proto : public Protolay { public: enum State { S_CLOSED, S_JOINING, S_LEAVING, S_GATHER, S_INSTALL, S_OPERATIONAL, S_MAX }; static std::string to_string(const State s) { switch (s) { case S_CLOSED: return "CLOSED"; case S_JOINING: return "JOINING"; case S_LEAVING: return "LEAVING"; case S_GATHER: return "GATHER"; case S_INSTALL: return "INSTALL"; case S_OPERATIONAL: return "OPERATIONAL"; default: gu_throw_fatal << "Invalid state"; } } friend std::ostream& operator<<(std::ostream&, const Proto&); friend class Consensus; /*! * Default constructor. */ Proto(gu::Config& conf, const UUID& my_uuid, SegmentId segment, const gu::URI& uri = gu::URI("evs://"), const size_t mtu = std::numeric_limits::max(), const View* rst_view = NULL); ~Proto(); const UUID& uuid() const { return my_uuid_; } std::string self_string() const { std::ostringstream os; os << "evs::proto(" << uuid() << ", " << to_string(state()) << ", " << current_view_.id() << ")"; return os.str(); } State state() const { return state_; } size_t known_size() const { return known_.size(); } bool is_output_empty() const { return output_.empty(); } std::string stats() const; void reset_stats(); bool is_flow_control(const seqno_t, const seqno_t win) const; int send_user(Datagram&, uint8_t, Order, seqno_t, seqno_t, size_t n_aggregated = 1); size_t mtu() const { return mtu_; } size_t aggregate_len() const; int send_user(const seqno_t); void complete_user(const seqno_t); int send_delegate(Datagram&); void send_gap(EVS_CALLER_ARG, const UUID&, const ViewId&, const Range, bool commit = false, bool req_all = false); const JoinMessage& create_join(); void send_join(bool tval = true); void set_join(const JoinMessage&, const UUID&); void set_leave(const LeaveMessage&, const UUID&); void send_leave(bool handle = true); void send_install(EVS_CALLER_ARG); void send_delayed_list(); void resend(const UUID&, const Range); void recover(const UUID&, const UUID&, const Range); void retrans_user(const UUID&, const MessageNodeList&); void retrans_leaves(const MessageNodeList&); void set_inactive(const UUID&); bool is_inactive(const UUID&) const; void check_inactive(); // Clean up foreign nodes according to install message. void cleanup_foreign(const InstallMessage&); void cleanup_views(); void cleanup_evicted(); void cleanup_joins(); size_t n_operational() const; void validate_reg_msg(const UserMessage&); void deliver_finish(const InputMapMsg&); void deliver(); void deliver_local(bool trans = false); void deliver_causal(uint8_t user_type, seqno_t seqno, const Datagram&); void validate_trans_msg(const UserMessage&); void deliver_trans(); void deliver_reg_view(const InstallMessage&, const View&); void deliver_trans_view(const InstallMessage&, const View&); void deliver_empty_view(); void setall_committed(bool val); bool is_all_committed() const; void setall_installed(bool val); bool is_all_installed() const; bool is_install_message() const { return install_message_ != 0; } bool is_representative(const UUID& pid) const; void shift_to(const State, const bool send_j = true); bool is_all_suspected(const UUID& uuid) const; const View& current_view() const { return current_view_; } // Message handlers private: /*! * Update input map safe seq * @param uuid Node uuid * @param seq Sequence number * @return Input map seqno before updating */ seqno_t update_im_safe_seq(const size_t uuid, const seqno_t seq); /*! * Update input map safe seqs according to message node list. Only * inactive nodes are allowed to be in */ bool update_im_safe_seqs(const MessageNodeList&); bool is_msg_from_previous_view(const Message&); void check_suspects(const UUID&, const MessageNodeList&); void cross_check_inactives(const UUID&, const MessageNodeList&); void check_unseen(); void check_nil_view_id(); void asymmetry_elimination(); void handle_foreign(const Message&); void handle_user(const UserMessage&, NodeMap::iterator, const Datagram&); void handle_delegate(const DelegateMessage&, NodeMap::iterator, const Datagram&); void handle_gap(const GapMessage&, NodeMap::iterator); void handle_join(const JoinMessage&, NodeMap::iterator); void handle_leave(const LeaveMessage&, NodeMap::iterator); void handle_install(const InstallMessage&, NodeMap::iterator); void handle_delayed_list(const DelayedListMessage&, NodeMap::iterator); void populate_node_list(MessageNodeList*) const; void isolate(gu::datetime::Period period); public: static size_t unserialize_message(const UUID&, const Datagram&, Message*); void handle_msg(const Message& msg, const Datagram& dg = Datagram(), bool direct = true); // Protolay void handle_up(const void*, const Datagram&, const ProtoUpMeta&); int handle_down(Datagram& wb, const ProtoDownMeta& dm); int send_down(Datagram& dg, const ProtoDownMeta& dm); void handle_stable_view(const View& view) { set_stable_view(view); } void handle_fencing(const UUID& uuid) { } void connect(bool first) { gu_trace(shift_to(S_JOINING)); gu_trace(send_join(first)); } void close(bool force = false) { // shifting to S_LEAVING from S_INSTALL is troublesome, // instead of that raise a boolean flag to indicate that // shifting to S_LEAVING should be done once S_OPERATIONAL // is reached // // #760 - pending leave should be done also from S_GATHER, // changing state to S_LEAVING resets timers and may prevent // remaining nodes to reach new group until install timer // times out log_debug << self_string() << " closing in state " << state(); if (state() != S_GATHER && state() != S_INSTALL) { gu_trace(shift_to(S_LEAVING)); gu_trace(send_leave()); pending_leave_ = false; } else { pending_leave_ = true; } } void close(const UUID& uuid) { set_inactive(uuid); } bool set_param(const std::string& key, const std::string& val); void handle_get_status(gu::Status& status) const; // gu::datetime::Date functions do appropriate actions for timer handling // and return next expiration time private: public: enum Timer { T_INACTIVITY, T_RETRANS, T_INSTALL, T_STATS }; /*! * Internal timer list */ typedef MultiMap TimerList; private: TimerList timers_; public: // These need currently to be public for unit tests void handle_inactivity_timer(); void handle_retrans_timer(); void handle_install_timer(); void handle_stats_timer(); gu::datetime::Date next_expiration(const Timer) const; void reset_timer(Timer); void cancel_timer(Timer); gu::datetime::Date handle_timers(); /*! * @brief Flags controlling what debug information is logged if * debug logging is turned on. */ enum DebugFlags { D_STATE = 1 << 0, /*!< State changes */ D_TIMERS = 1 << 1, /*!< Timer handling */ D_CONSENSUS = 1 << 2, /*!< Consensus protocol */ D_USER_MSGS = 1 << 3, /*!< User messages */ D_DELEGATE_MSGS = 1 << 4, /*!< Delegate messages */ D_GAP_MSGS = 1 << 5, /*!< Gap messages */ D_JOIN_MSGS = 1 << 6, /*!< Join messages */ D_INSTALL_MSGS = 1 << 7, /*!< Install messages */ D_LEAVE_MSGS = 1 << 8, /*!< Leave messages */ D_FOREIGN_MSGS = 1 << 9, /*!< Foreing messages */ D_RETRANS = 1 << 10, /*!< Retransmitted/recovered messages */ D_DELIVERY = 1 << 11 /*!< Message delivery */ }; /*! * @brief Flags controlling what info log is printed in logs. */ enum InfoFlags { I_VIEWS = 1 << 0, /*!< View changes */ I_STATE = 1 << 1, /*!< State change information */ I_STATISTICS = 1 << 2, /*!< Statistics */ I_PROFILING = 1 << 3 /*!< Profiling information */ }; private: int version_; int debug_mask_; int info_mask_; gu::datetime::Date last_stats_report_; bool collect_stats_; gu::Histogram hs_agreed_; gu::Histogram hs_safe_; gu::Histogram hs_local_causal_; gu::Stats safe_deliv_latency_; long long int send_queue_s_; long long int n_send_queue_s_; std::vector sent_msgs_; long long int retrans_msgs_; long long int recovered_msgs_; std::vector recvd_msgs_; std::vector delivered_msgs_; prof::Profile send_user_prof_; prof::Profile send_gap_prof_; prof::Profile send_join_prof_; prof::Profile send_install_prof_; prof::Profile send_leave_prof_; prof::Profile consistent_prof_; prof::Profile consensus_prof_; prof::Profile shift_to_prof_; prof::Profile input_map_prof_; prof::Profile delivery_prof_; bool delivering_; UUID my_uuid_; SegmentId segment_; // // Known instances friend class Node; friend class InspectNode; NodeMap known_; NodeMap::iterator self_i_; // gu::datetime::Period view_forget_timeout_; gu::datetime::Period inactive_timeout_; gu::datetime::Period suspect_timeout_; gu::datetime::Period inactive_check_period_; gu::datetime::Period retrans_period_; gu::datetime::Period install_timeout_; gu::datetime::Period join_retrans_period_; gu::datetime::Period stats_report_period_; gu::datetime::Period causal_keepalive_period_; gu::datetime::Period delay_margin_; gu::datetime::Period delayed_keep_period_; gu::datetime::Date last_inactive_check_; gu::datetime::Date last_causal_keepalive_; // Current view id // ViewId current_view; View current_view_; View previous_view_; typedef std::map ViewList; // List of previously seen views from which messages should not be // accepted anymore ViewList previous_views_; // Seen views in gather state, will be copied to previous views // when shifting to operational ViewList gather_views_; // Map containing received messages and aru/safe seqnos InputMap* input_map_; // Helper container for local causal messages class CausalMessage { public: CausalMessage(uint8_t user_type, seqno_t seqno, const Datagram& datagram) : user_type_(user_type), seqno_ (seqno ), datagram_ (datagram ), tstamp_ (gu::datetime::Date::now()) { } uint8_t user_type() const { return user_type_; } seqno_t seqno() const { return seqno_ ; } const Datagram& datagram() const { return datagram_ ; } const gu::datetime::Date& tstamp() const { return tstamp_ ; } private: uint8_t user_type_; seqno_t seqno_; Datagram datagram_; gu::datetime::Date tstamp_; }; // Queue containing local causal messages std::deque causal_queue_; // Consensus module Consensus consensus_; // Last received install message InstallMessage* install_message_; // Highest seen view id seqno uint32_t max_view_id_seq_; // Install attempt counter uint32_t attempt_seq_; // Boolean to suppress logging when new view has been // detected bool new_view_logged_; // Install timeout counting int max_install_timeouts_; int install_timeout_count_; // Sequence number to maintain membership message FIFO order int64_t fifo_seq_; // Last sent seq seqno_t last_sent_; // Protocol send window size seqno_t send_window_; // User send window size seqno_t user_send_window_; // Output message queue std::deque > output_; std::vector send_buf_; uint32_t max_output_size_; size_t mtu_; bool use_aggregate_; bool self_loopback_; State state_; int shift_to_rfcnt_; bool pending_leave_; gu::datetime::Date isolation_end_; class DelayedEntry { public: typedef enum { S_OK, S_DELAYED } State; DelayedEntry(const std::string& addr) : addr_ (addr), tstamp_(gu::datetime::Date::now()), state_(S_DELAYED), state_change_cnt_(1) { } const std::string& addr() const { return addr_; } void set_tstamp(gu::datetime::Date tstamp) { tstamp_ = tstamp; } gu::datetime::Date tstamp() const { return tstamp_; } void set_state(State state, const gu::datetime::Period decay_period, const gu::datetime::Date now) { if (state == S_DELAYED && state_ != state) { // Limit to 0xff, see DelayedList format in DelayedListMessage // restricts this value to uint8_t max. if (state_change_cnt_ < 0xff) ++state_change_cnt_; } else if (state == S_OK && tstamp_ + decay_period < now) { if (state_change_cnt_ > 0) --state_change_cnt_; } state_ = state; } State state() const {return state_; } size_t state_change_cnt() const { return state_change_cnt_; } private: const std::string addr_; gu::datetime::Date tstamp_; State state_; size_t state_change_cnt_; }; typedef std::map DelayedList; DelayedList delayed_list_; size_t auto_evict_; // non-copyable Proto(const Proto&); void operator=(const Proto&); }; #endif // EVS_PROTO_HPP galera-3-25.3.20/gcomm/src/evs_input_map2.cpp0000644000015300001660000002524513042054732020476 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "evs_input_map2.hpp" #include "gcomm/util.hpp" #include "gu_exception.hpp" #include "gu_logger.hpp" #include "gu_buffer.hpp" #include #include ////////////////////////////////////////////////////////////////////////// // // Static operators and functions // ////////////////////////////////////////////////////////////////////////// // Compare node index LUs class NodeIndexLUCmpOp { public: bool operator()(const gcomm::evs::InputMapNodeIndex::value_type& a, const gcomm::evs::InputMapNodeIndex::value_type& b) const { return (a.range().lu() < b.range().lu()); } }; class NodeIndexHSCmpOp { public: bool operator()(const gcomm::evs::InputMapNodeIndex::value_type& a, const gcomm::evs::InputMapNodeIndex::value_type& b) const { return (a.range().hs() < b.range().hs()); } }; // Compare node index safe seqs class NodeIndexSafeSeqCmpOp { public: bool operator()(const gcomm::evs::InputMapNodeIndex::value_type& a, const gcomm::evs::InputMapNodeIndex::value_type& b) const { return a.safe_seq() < b.safe_seq(); } }; ////////////////////////////////////////////////////////////////////////// // // Ostream operators // ////////////////////////////////////////////////////////////////////////// std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMapNode& in) { return (os << "node: {" << "idx=" << in.index() << "," << "range=" << in.range() << "," << "safe_seq=" << in.safe_seq() << "}"); } std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMapNodeIndex& ni) { copy(ni.begin(), ni.end(), std::ostream_iterator(os, " ")); return os; } std::ostream& gcomm::operator<<(std::ostream& os, const InputMapMsgKey& mk) { return (os << "(" << mk.index() << "," << mk.seq() << ")"); } std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMapMsg& m) { return (os << m.msg()); } std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMap& im) { return (os << "evs::input_map: {" << "aru_seq=" << im.aru_seq() << "," << "safe_seq=" << im.safe_seq() << "," << "node_index=" << *im.node_index_ #ifndef NDEBUG << "," << "msg_index=" << *im.msg_index_ << "," << "recovery_index=" << *im.recovery_index_ #endif // !NDEBUG << "}"); } ////////////////////////////////////////////////////////////////////////// // // Constructors/destructors // ////////////////////////////////////////////////////////////////////////// gcomm::evs::InputMap::InputMap() : window_ (-1), safe_seq_ (-1), aru_seq_ (-1), node_index_ (new InputMapNodeIndex()), msg_index_ (new InputMapMsgIndex()), recovery_index_ (new InputMapMsgIndex()) { } gcomm::evs::InputMap::~InputMap() { clear(); delete node_index_; delete msg_index_; delete recovery_index_; } ////////////////////////////////////////////////////////////////////////// // // Public member functions // ////////////////////////////////////////////////////////////////////////// void gcomm::evs::InputMap::reset(const size_t nodes, const seqno_t window) { gcomm_assert(msg_index_->empty() == true && recovery_index_->empty() == true); node_index_->clear(); window_ = window; log_debug << " size " << node_index_->size(); gu_trace(node_index_->resize(nodes, InputMapNode())); for (size_t i = 0; i < nodes; ++i) { node_index_->at(i).set_index(i); } log_debug << *node_index_ << " size " << node_index_->size(); } gcomm::evs::seqno_t gcomm::evs::InputMap::min_hs() const { seqno_t ret; gcomm_assert(node_index_->empty() == false); ret = min_element(node_index_->begin(), node_index_->end(), NodeIndexHSCmpOp())->range().hs(); return ret; } gcomm::evs::seqno_t gcomm::evs::InputMap::max_hs() const { seqno_t ret; gcomm_assert(node_index_->empty() == false); ret = max_element(node_index_->begin(), node_index_->end(), NodeIndexHSCmpOp())->range().hs(); return ret; } void gcomm::evs::InputMap::set_safe_seq(const size_t uuid, const seqno_t seq) { gcomm_assert(seq != -1); // @note This assertion does not necessarily hold. Some other // instance may well have higher all received up to seqno // than this (due to packet loss). Commented out... and left // for future reference. // gcomm_assert(aru_seq != seqno_t::max() && seq <= aru_seq); // Update node safe seq. Must (at least should) be updated // in monotonically increasing order if node works ok. InputMapNode& node(node_index_->at(uuid)); gcomm_assert(seq >= node.safe_seq()) << "node.safe_seq=" << node.safe_seq() << " seq=" << seq; node.set_safe_seq(seq); // Update global safe seq which must be monotonically increasing. InputMapNodeIndex::const_iterator min = min_element(node_index_->begin(), node_index_->end(), NodeIndexSafeSeqCmpOp()); const seqno_t minval = min->safe_seq(); gcomm_assert(minval >= safe_seq_); safe_seq_ = minval; // Global safe seq must always be smaller than equal to aru seq gcomm_assert(safe_seq_ <= aru_seq_); // Cleanup recovery index cleanup_recovery_index(); } void gcomm::evs::InputMap::clear() { if (msg_index_->empty() == false) { log_warn << "discarding " << msg_index_->size() << " messages from message index"; } msg_index_->clear(); if (recovery_index_->empty() == false) { log_debug << "discarding " << recovery_index_->size() << " messages from recovery index"; } recovery_index_->clear(); node_index_->clear(); aru_seq_ = -1; safe_seq_ = -1; } gcomm::evs::Range gcomm::evs::InputMap::insert(const size_t uuid, const UserMessage& msg, const Datagram& rb) { Range range; // Only insert messages with meaningful seqno gcomm_assert(msg.seq() > -1); // User should check aru_seq before inserting. This check is left // also in optimized builds since violating it may cause duplicate // messages. gcomm_assert(aru_seq_ < msg.seq()) << "aru seq " << aru_seq_ << " msg seq " << msg.seq() << " index size " << msg_index_->size(); gcomm_assert(uuid < node_index_->size()); InputMapNode& node((*node_index_)[uuid]); range = node.range(); // User should check LU before inserting. This check is left // also in optimized builds since violating it may cause duplicate // messages gcomm_assert(range.lu() <= msg.seq()) << "lu " << range.lu() << " > " << msg.seq(); // Check whether this message has already been seen if (msg.seq() < node.range().lu() || (msg.seq() <= node.range().hs() && recovery_index_->find(InputMapMsgKey(node.index(), msg.seq())) != recovery_index_->end())) { return node.range(); } // Loop over message seqno range and insert messages when not // already found for (seqno_t s = msg.seq(); s <= msg.seq() + msg.seq_range(); ++s) { InputMapMsgIndex::iterator msg_i; if (range.hs() < s) { msg_i = msg_index_->end(); } else { msg_i = msg_index_->find(InputMapMsgKey(node.index(), s)); } if (msg_i == msg_index_->end()) { Datagram ins_dg(s == msg.seq() ? Datagram(rb) : Datagram()); gu_trace((void)msg_index_->insert_unique( std::make_pair( InputMapMsgKey(node.index(), s), InputMapMsg( (s == msg.seq() ? msg : UserMessage(msg.version(), msg.source(), msg.source_view_id(), s, msg.aru_seq(), 0, O_DROP)), ins_dg)))); } // Update highest seen if (range.hs() < s) { range.set_hs(s); } // Update lowest unseen if (range.lu() == s) { seqno_t i(s); do { ++i; } while ( i <= range.hs() && (msg_index_->find(InputMapMsgKey(node.index(), i)) != msg_index_->end() || recovery_index_->find(InputMapMsgKey(node.index(), i)) != recovery_index_->end())); range.set_lu(i); } } node.set_range(range); update_aru(); return range; } void gcomm::evs::InputMap::erase(iterator i) { gu_trace(recovery_index_->insert_unique(*i)); gu_trace(msg_index_->erase(i)); } gcomm::evs::InputMap::iterator gcomm::evs::InputMap::find(const size_t uuid, const seqno_t seq) const { iterator ret; const InputMapNode& node(node_index_->at(uuid)); const InputMapMsgKey key(node.index(), seq); gu_trace(ret = msg_index_->find(key)); return ret; } gcomm::evs::InputMap::iterator gcomm::evs::InputMap::recover(const size_t uuid, const seqno_t seq) const { iterator ret; const InputMapNode& node(node_index_->at(uuid)); const InputMapMsgKey key(node.index(), seq); gu_trace(ret = recovery_index_->find_checked(key)); return ret; } ////////////////////////////////////////////////////////////////////////// // // Private member functions // ////////////////////////////////////////////////////////////////////////// inline void gcomm::evs::InputMap::update_aru() { InputMapNodeIndex::const_iterator min = min_element(node_index_->begin(), node_index_->end(), NodeIndexLUCmpOp()); const seqno_t minval = min->range().lu(); /* aru_seq must not decrease */ gcomm_assert(minval - 1 >= aru_seq_); aru_seq_ = minval - 1; } void gcomm::evs::InputMap::cleanup_recovery_index() { gcomm_assert(node_index_->size() > 0); InputMapMsgIndex::iterator i = recovery_index_->lower_bound( InputMapMsgKey(0, safe_seq_ + 1)); recovery_index_->erase(recovery_index_->begin(), i); } galera-3-25.3.20/gcomm/src/gmcast.cpp0000644000015300001660000015211513042054732017016 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "gmcast.hpp" #include "gmcast_proto.hpp" #include "gcomm/common.hpp" #include "gcomm/conf.hpp" #include "gcomm/util.hpp" #include "gcomm/map.hpp" #include "defaults.hpp" #include "gu_convert.hpp" #include "gu_resolver.hpp" #include "gu_asio.hpp" // gu::conf::use_ssl using namespace std::rel_ops; using gcomm::gmcast::Proto; using gcomm::gmcast::ProtoMap; using gcomm::gmcast::Link; using gcomm::gmcast::LinkMap; using gcomm::gmcast::Message; const long gcomm::GMCast::max_retry_cnt_(std::numeric_limits::max()); static void set_tcp_defaults (gu::URI* uri) { // what happens if there is already this parameter? uri->set_option(gcomm::Conf::TcpNonBlocking, gu::to_string(1)); } static bool check_tcp_uri(const gu::URI& uri) { return (uri.get_scheme() == gu::scheme::tcp || uri.get_scheme() == gu::scheme::ssl); } static std::string get_scheme(bool use_ssl) { if (use_ssl == true) { return gu::scheme::ssl; } return gu::scheme::tcp; } gcomm::GMCast::GMCast(Protonet& net, const gu::URI& uri, const UUID* my_uuid) : Transport (net, uri), version_(check_range(Conf::GMCastVersion, param(conf_, uri, Conf::GMCastVersion, "0"), 0, max_version_ + 1)), segment_ (check_range(Conf::GMCastSegment, param(conf_, uri, Conf::GMCastSegment, "0"), 0, 255)), my_uuid_ (my_uuid ? *my_uuid : UUID(0, 0)), use_ssl_ (param(conf_, uri, gu::conf::use_ssl, "false")), // @todo: technically group name should be in path component group_name_ (param(conf_, uri, Conf::GMCastGroup, "")), listen_addr_ ( param( conf_, uri, Conf::GMCastListenAddr, get_scheme(use_ssl_) + "://0.0.0.0")), // how to make it IPv6 safe? initial_addrs_(), mcast_addr_ (param(conf_, uri, Conf::GMCastMCastAddr, "")), bind_ip_ (""), mcast_ttl_ (check_range( Conf::GMCastMCastTTL, param(conf_, uri, Conf::GMCastMCastTTL, "1"), 1, 256)), listener_ (0), mcast_ (), pending_addrs_(), remote_addrs_ (), addr_blacklist_(), relaying_ (false), isolate_ (false), proto_map_ (new ProtoMap()), relay_set_ (), segment_map_ (), self_index_ (std::numeric_limits::max()), time_wait_ (param( conf_, uri, Conf::GMCastTimeWait, Defaults::GMCastTimeWait)), check_period_ ("PT0.5S"), peer_timeout_ (param( conf_, uri, Conf::GMCastPeerTimeout, Defaults::GMCastPeerTimeout)), max_initial_reconnect_attempts_( param(conf_, uri, Conf::GMCastMaxInitialReconnectAttempts, gu::to_string(max_retry_cnt_))), next_check_ (gu::datetime::Date::now()) { log_info << "GMCast version " << version_; if (group_name_ == "") { gu_throw_error (EINVAL) << "Group not defined in URL: " << uri_.to_string(); } set_initial_addr(uri_); try { listen_addr_ = uri_.get_option (Conf::GMCastListenAddr); } catch (gu::NotFound&) {} try { gu::URI uri(listen_addr_); /* check validity of the address */ } catch (gu::Exception&) { /* most probably no scheme, try to append one and see if it succeeds */ listen_addr_ = uri_string(get_scheme(use_ssl_), listen_addr_); gu_trace(gu::URI uri(listen_addr_)); } gu::URI listen_uri(listen_addr_); if (check_tcp_uri(listen_uri) == false) { gu_throw_error (EINVAL) << "listen addr '" << listen_addr_ << "' does not specify supported protocol"; } if (gu::net::resolve(listen_uri).get_addr().is_anyaddr() == false) { // bind outgoing connections to the same address as listening. gu_trace(bind_ip_ = listen_uri.get_host()); } std::string port(Defaults::GMCastTcpPort); try { port = listen_uri.get_port(); } catch (gu::NotSet&) { // if no listen port is set for listen address in the options, // see if base port was configured try { port = conf_.get(BASE_PORT_KEY); } catch (gu::NotSet&) { // if no base port configured, try port from the connection address try { port = uri_.get_port(); } catch (gu::NotSet&) {} } listen_addr_ += ":" + port; } conf_.set(BASE_PORT_KEY, port); listen_addr_ = gu::net::resolve(listen_addr_).to_string(); // resolving sets scheme to tcp, have to rewrite for ssl if (use_ssl_ == true) { listen_addr_.replace(0, 3, gu::scheme::ssl); } std::set::iterator iaself(initial_addrs_.find(listen_addr_)); if (iaself != initial_addrs_.end()) { log_debug << "removing own listen address '" << *iaself << "' from initial address list"; initial_addrs_.erase(iaself); } if (mcast_addr_ != "") { try { port = uri_.get_option(Conf::GMCastMCastPort); } catch (gu::NotFound&) {} mcast_addr_ = gu::net::resolve( uri_string(gu::scheme::udp, mcast_addr_, port)).to_string(); } log_info << self_string() << " listening at " << listen_addr_; log_info << self_string() << " multicast: " << mcast_addr_ << ", ttl: " << mcast_ttl_; conf_.set(Conf::GMCastListenAddr, listen_addr_); conf_.set(Conf::GMCastMCastAddr, mcast_addr_); conf_.set(Conf::GMCastVersion, gu::to_string(version_)); conf_.set(Conf::GMCastTimeWait, gu::to_string(time_wait_)); conf_.set(Conf::GMCastMCastTTL, gu::to_string(mcast_ttl_)); conf_.set(Conf::GMCastPeerTimeout, gu::to_string(peer_timeout_)); conf_.set(Conf::GMCastSegment, gu::to_string(segment_)); } gcomm::GMCast::~GMCast() { if (listener_ != 0) close(); delete proto_map_; } void gcomm::GMCast::set_initial_addr(const gu::URI& uri) { const gu::URI::AuthorityList& al(uri.get_authority_list()); for (gu::URI::AuthorityList::const_iterator i(al.begin()); i != al.end(); ++i) { std::string host; try { host = i->host(); } catch (gu::NotSet& ns) { gu_throw_error(EINVAL) << "Unset host in URL " << uri; } if (host_is_any(host)) continue; std::string port; try { port = i->port(); } catch (gu::NotSet& ) { try { port = conf_.get(BASE_PORT_KEY); } catch (gu::NotFound&) { port = Defaults::GMCastTcpPort; } } std::string initial_uri = uri_string(get_scheme(use_ssl_), host, port); std::string initial_addr; try { initial_addr = gu::net::resolve(initial_uri).to_string(); } catch (gu::Exception& ) { log_warn << "Failed to resolve " << initial_uri; continue; } // resolving sets scheme to tcp, have to rewrite for ssl if (use_ssl_ == true) { initial_addr.replace(0, 3, gu::scheme::ssl); } if (check_tcp_uri(initial_addr) == false) { gu_throw_error (EINVAL) << "initial addr '" << initial_addr << "' is not valid"; } log_debug << self_string() << " initial addr: " << initial_addr; initial_addrs_.insert(initial_addr); } } void gcomm::GMCast::connect_precheck(bool start_prim) { if (!start_prim && initial_addrs_.empty()) { gu_throw_fatal << "No address to connect"; } } void gcomm::GMCast::connect() { pstack_.push_proto(this); log_debug << "gmcast " << uuid() << " connect"; gu::URI listen_uri(listen_addr_); set_tcp_defaults (&listen_uri); listener_ = pnet().acceptor(listen_uri); gu_trace (listener_->listen(listen_uri)); if (!mcast_addr_.empty()) { gu::URI mcast_uri( mcast_addr_ + '?' + gcomm::Socket::OptIfAddr + '=' + gu::URI(listen_addr_).get_host()+'&' + gcomm::Socket::OptNonBlocking + "=1&" + gcomm::Socket::OptMcastTTL + '=' + gu::to_string(mcast_ttl_) ); mcast_ = pnet().socket(mcast_uri); gu_trace(mcast_->connect(mcast_uri)); } if (!initial_addrs_.empty()) { for (std::set::const_iterator i(initial_addrs_.begin()); i != initial_addrs_.end(); ++i) { insert_address(*i, UUID(), pending_addrs_); AddrList::iterator ai(pending_addrs_.find(*i)); AddrList::value(ai).set_max_retries(max_retry_cnt_); gu_trace (gmcast_connect(*i)); } } } void gcomm::GMCast::connect(const gu::URI& uri) { set_initial_addr(uri); connect(); } void gcomm::GMCast::close(bool force) { log_debug << "gmcast " << uuid() << " close"; pstack_.pop_proto(this); if (mcast_ != 0) { mcast_->close(); // delete mcast; // mcast = 0; } gcomm_assert(listener_ != 0); listener_->close(); delete listener_; listener_ = 0; segment_map_.clear(); for (ProtoMap::iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { delete ProtoMap::value(i); } proto_map_->clear(); pending_addrs_.clear(); remote_addrs_.clear(); } // Erase proto entry in safe manner // 1) Erase from relay_set_ // 2) Erase from proto_map_ // 3) Delete proto entry void gcomm::GMCast::erase_proto(gmcast::ProtoMap::iterator i) { Proto* p(ProtoMap::value(i)); std::set::iterator si(relay_set_.find(p->socket().get())); if (si != relay_set_.end()) { relay_set_.erase(si); } proto_map_->erase(i); delete p; } void gcomm::GMCast::gmcast_accept() { SocketPtr tp; try { tp = listener_->accept(); } catch (gu::Exception& e) { log_warn << e.what(); return; } if (isolate_ == true) { log_debug << "dropping accepted socket due to isolation"; tp->close(); return; } Proto* peer = new Proto ( *this, version_, tp, listener_->listen_addr(), "", mcast_addr_, segment_, group_name_); std::pair ret = proto_map_->insert(std::make_pair(tp->id(), peer)); if (ret.second == false) { delete peer; gu_throw_fatal << "Failed to add peer to map"; } if (tp->state() == Socket::S_CONNECTED) { peer->send_handshake(); } else { log_debug << "accepted socket is connecting"; } log_debug << "handshake sent"; } void gcomm::GMCast::gmcast_connect(const std::string& remote_addr) { if (remote_addr == listen_addr_) return; gu::URI connect_uri(remote_addr); set_tcp_defaults (&connect_uri); if (!bind_ip_.empty()) { connect_uri.set_option(gcomm::Socket::OptIfAddr, bind_ip_); } SocketPtr tp = pnet().socket(connect_uri); try { tp->connect(connect_uri); } catch (gu::Exception& e) { log_debug << "Connect failed: " << e.what(); // delete tp; return; } Proto* peer = new Proto ( *this, version_, tp, listener_->listen_addr(), remote_addr, mcast_addr_, segment_, group_name_); std::pair ret = proto_map_->insert(std::make_pair(tp->id(), peer)); if (ret.second == false) { delete peer; gu_throw_fatal << "Failed to add peer to map"; } ret.first->second->wait_handshake(); } void gcomm::GMCast::gmcast_forget(const UUID& uuid, const gu::datetime::Period& wait_period) { /* Close all proto entries corresponding to uuid */ ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; Proto* rp = ProtoMap::value(pi); if (rp->remote_uuid() == uuid) { erase_proto(pi); } } /* Set all corresponding entries in address list to have retry cnt * greater than max retries and next reconnect time after some period */ AddrList::iterator ai; for (ai = remote_addrs_.begin(); ai != remote_addrs_.end(); ++ai) { AddrEntry& ae(AddrList::value(ai)); if (ae.uuid() == uuid) { log_info << "forgetting " << uuid << " (" << AddrList::key(ai) << ")"; ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; if (ProtoMap::value(pi)->remote_addr() == AddrList::key(ai)) { log_info << "deleting entry " << AddrList::key(ai); erase_proto(pi); } } ae.set_max_retries(0); ae.set_retry_cnt(1); gu::datetime::Date now(gu::datetime::Date::now()); // Don't reduce next reconnect time if it is set greater than // requested if ((now + wait_period > ae.next_reconnect()) || (ae.next_reconnect() == gu::datetime::Date::max())) { ae.set_next_reconnect(gu::datetime::Date::now() + wait_period); } else { log_debug << "not decreasing next reconnect for " << uuid; } } } /* Update state */ update_addresses(); } void gcomm::GMCast::handle_connected(Proto* rp) { const SocketPtr tp(rp->socket()); assert(tp->state() == Socket::S_CONNECTED); log_debug << "transport " << tp << " connected"; if (rp->state() == Proto::S_INIT) { log_debug << "sending handshake"; // accepted socket was waiting for underlying transport // handshake to finish rp->send_handshake(); } } void gcomm::GMCast::handle_established(Proto* est) { log_info << self_string() << " connection established to " << est->remote_uuid() << " " << est->remote_addr(); if (is_evicted(est->remote_uuid())) { log_warn << "Closing connection to evicted node " << est->remote_uuid(); erase_proto(proto_map_->find_checked(est->socket()->id())); update_addresses(); return; } if (est->remote_uuid() == uuid()) { std::set::iterator ia_i(initial_addrs_.find(est->remote_addr())); if (ia_i != initial_addrs_.end()) { initial_addrs_.erase(ia_i); } AddrList::iterator i(pending_addrs_.find(est->remote_addr())); if (i != pending_addrs_.end()) { if (addr_blacklist_.find(est->remote_addr()) == addr_blacklist_.end()) { log_warn << self_string() << " address '" << est->remote_addr() << "' points to own listening address, blacklisting"; } pending_addrs_.erase(i); addr_blacklist_.insert(make_pair(est->remote_addr(), AddrEntry(gu::datetime::Date::now(), gu::datetime::Date::now(), est->remote_uuid()))); } erase_proto(proto_map_->find_checked(est->socket()->id())); update_addresses(); return; } // If address is found from pending_addrs_, move it to remote_addrs list // and set retry cnt to -1 const std::string& remote_addr(est->remote_addr()); AddrList::iterator i(pending_addrs_.find(remote_addr)); if (i != pending_addrs_.end()) { log_debug << "Erasing " << remote_addr << " from panding list"; pending_addrs_.erase(i); } if ((i = remote_addrs_.find(remote_addr)) == remote_addrs_.end()) { log_debug << "Inserting " << remote_addr << " to remote list"; insert_address (remote_addr, est->remote_uuid(), remote_addrs_); i = remote_addrs_.find(remote_addr); } else if (AddrList::value(i).uuid() != est->remote_uuid()) { log_info << "remote endpoint " << est->remote_addr() << " changed identity " << AddrList::value(i).uuid() << " -> " << est->remote_uuid(); remote_addrs_.erase(i); i = remote_addrs_.insert_unique( make_pair(est->remote_addr(), AddrEntry(gu::datetime::Date::now(), gu::datetime::Date::now(), est->remote_uuid()))); } if (AddrList::value(i).retry_cnt() > AddrList::value(i).max_retries()) { log_warn << "discarding established (time wait) " << est->remote_uuid() << " (" << est->remote_addr() << ") "; erase_proto(proto_map_->find(est->socket()->id())); update_addresses(); return; } // send_up(Datagram(), p->remote_uuid()); // init retry cnt to -1 to avoid unnecessary logging at first attempt // max retries will be readjusted in handle stable view AddrList::value(i).set_retry_cnt(-1); AddrList::value(i).set_max_retries(max_initial_reconnect_attempts_); // Cleanup all previously established entries with same // remote uuid. It is assumed that the most recent connection // is usually the healthiest one. ProtoMap::iterator j, j_next; for (j = proto_map_->begin(); j != proto_map_->end(); j = j_next) { j_next = j, ++j_next; Proto* p(ProtoMap::value(j)); if (p->remote_uuid() == est->remote_uuid()) { if (p->handshake_uuid() < est->handshake_uuid()) { log_debug << self_string() << " cleaning up duplicate " << p->socket() << " after established " << est->socket(); erase_proto(j); } else if (p->handshake_uuid() > est->handshake_uuid()) { log_debug << self_string() << " cleaning up established " << est->socket() << " which is duplicate of " << p->socket(); erase_proto(proto_map_->find_checked(est->socket()->id())); update_addresses(); return; } else { assert(p == est); } } } AddrList::iterator ali(find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(est->remote_uuid()))); if (ali != remote_addrs_.end()) { AddrList::value(ali).set_last_connect(); } else { log_warn << "peer " << est->remote_addr() << " not found from remote addresses"; } update_addresses(); } void gcomm::GMCast::handle_failed(Proto* failed) { log_debug << "handle failed: " << *failed; const std::string& remote_addr = failed->remote_addr(); bool found_ok(false); for (ProtoMap::const_iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* p(ProtoMap::value(i)); if (p != failed && p->state() <= Proto::S_OK && p->remote_addr() == failed->remote_addr()) { log_debug << "found live " << *p; found_ok = true; break; } } if (found_ok == false && remote_addr != "") { AddrList::iterator i; if ((i = pending_addrs_.find(remote_addr)) != pending_addrs_.end() || (i = remote_addrs_.find(remote_addr)) != remote_addrs_.end()) { AddrEntry& ae(AddrList::value(i)); ae.set_retry_cnt(ae.retry_cnt() + 1); gu::datetime::Date rtime = gu::datetime::Date::now() + gu::datetime::Period("PT1S"); log_debug << self_string() << " setting next reconnect time to " << rtime << " for " << remote_addr; ae.set_next_reconnect(rtime); } } erase_proto(proto_map_->find_checked(failed->socket()->id())); update_addresses(); } bool gcomm::GMCast::is_connected(const std::string& addr, const UUID& uuid) const { for (ProtoMap::const_iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* conn = ProtoMap::value(i); if (addr == conn->remote_addr() || uuid == conn->remote_uuid()) { return true; } } return false; } void gcomm::GMCast::insert_address (const std::string& addr, const UUID& uuid, AddrList& alist) { if (addr == listen_addr_) { gu_throw_fatal << "Trying to add self addr " << addr << " to addr list"; } if (alist.insert(make_pair(addr, AddrEntry(gu::datetime::Date::now(), gu::datetime::Date::now(), uuid))).second == false) { log_warn << "Duplicate entry: " << addr; } else { log_debug << self_string() << ": new address entry " << uuid << ' ' << addr; } } void gcomm::GMCast::update_addresses() { LinkMap link_map; std::set uuids; /* Add all established connections into uuid_map and update * list of remote addresses */ ProtoMap::iterator i, i_next; for (i = proto_map_->begin(); i != proto_map_->end(); i = i_next) { i_next = i, ++i_next; Proto* rp = ProtoMap::value(i); if (rp->state() == Proto::S_OK) { if (rp->remote_addr() == "" || rp->remote_uuid() == UUID::nil()) { gu_throw_fatal << "Protocol error: local: (" << my_uuid_ << ", '" << listen_addr_ << "'), remote: (" << rp->remote_uuid() << ", '" << rp->remote_addr() << "')"; } if (remote_addrs_.find(rp->remote_addr()) == remote_addrs_.end()) { log_warn << "Connection exists but no addr on addr list for " << rp->remote_addr(); insert_address(rp->remote_addr(), rp->remote_uuid(), remote_addrs_); } if (uuids.insert(rp->remote_uuid()).second == false) { // Duplicate entry, drop this one // @todo Deeper inspection about the connection states log_debug << self_string() << " dropping duplicate entry"; erase_proto(i); } else { link_map.insert(Link(rp->remote_uuid(), rp->remote_addr(), rp->mcast_addr())); } } } /* Send topology change message containing only established * connections */ for (ProtoMap::iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* gp = ProtoMap::value(i); // @todo: a lot of stuff here is done for each connection, including // message creation and serialization. Need a mcast_msg() call // and move this loop in there. if (gp->state() == Proto::S_OK) gp->send_topology_change(link_map); } /* Add entries reported by all other nodes to address list to * get complete view of existing uuids/addresses */ for (ProtoMap::iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* rp = ProtoMap::value(i); if (rp->state() == Proto::S_OK) { for (LinkMap::const_iterator j = rp->link_map().begin(); j != rp->link_map().end(); ++j) { const UUID& link_uuid(LinkMap::key(j)); const std::string& link_addr(LinkMap::value(j).addr()); gcomm_assert(link_uuid != UUID::nil() && link_addr != ""); if (addr_blacklist_.find(link_addr) != addr_blacklist_.end()) { log_debug << self_string() << " address '" << link_addr << "' pointing to uuid " << link_uuid << " is blacklisted, skipping"; continue; } if (link_uuid != uuid() && remote_addrs_.find(link_addr) == remote_addrs_.end() && pending_addrs_.find(link_addr) == pending_addrs_.end()) { log_debug << self_string() << " conn refers to but no addr in addr list for " << link_addr; insert_address(link_addr, link_uuid, remote_addrs_); AddrList::iterator pi(remote_addrs_.find(link_addr)); assert(pi != remote_addrs_.end()); AddrEntry& ae(AddrList::value(pi)); // init retry cnt to -1 to avoid unnecessary logging // at first attempt // max retries will be readjusted in handle stable view ae.set_retry_cnt(-1); ae.set_max_retries(max_initial_reconnect_attempts_); // Add some randomness for first reconnect to avoid // simultaneous connects gu::datetime::Date rtime(gu::datetime::Date::now()); rtime = rtime + ::rand() % (100*gu::datetime::MSec); ae.set_next_reconnect(rtime); next_check_ = std::min(next_check_, rtime); } } } } // Build multicast tree log_debug << self_string() << " --- mcast tree begin ---"; segment_map_.clear(); Segment& local_segment(segment_map_[segment_]); if (mcast_ != 0) { log_debug << mcast_addr_; local_segment.push_back(mcast_.get()); } self_index_ = 0; for (ProtoMap::const_iterator i(proto_map_->begin()); i != proto_map_->end(); ++i) { const Proto& p(*ProtoMap::value(i)); log_debug << "Proto: " << p; if (p.remote_segment() == segment_) { if (p.state() == Proto::S_OK && (p.mcast_addr() == "" || p.mcast_addr() != mcast_addr_)) { local_segment.push_back(p.socket().get()); if (p.remote_uuid() < uuid()) { ++self_index_; } } } else { if (p.state() == Proto::S_OK) { Segment& remote_segment(segment_map_[p.remote_segment()]); remote_segment.push_back(p.socket().get()); } } } log_debug << self_string() << " self index: " << self_index_; log_debug << self_string() << " --- mcast tree end ---"; } void gcomm::GMCast::reconnect() { if (isolate_ == true) { log_debug << "skipping reconnect due to isolation"; return; } /* Loop over known remote addresses and connect if proto entry * does not exist */ gu::datetime::Date now = gu::datetime::Date::now(); AddrList::iterator i, i_next; for (i = pending_addrs_.begin(); i != pending_addrs_.end(); i = i_next) { i_next = i, ++i_next; const std::string& pending_addr(AddrList::key(i)); const AddrEntry& ae(AddrList::value(i)); if (is_connected (pending_addr, UUID::nil()) == false && ae.next_reconnect() <= now) { if (ae.retry_cnt() > ae.max_retries()) { log_info << "cleaning up pending addr " << pending_addr; pending_addrs_.erase(i); continue; // no reference to pending_addr after this } else if (ae.next_reconnect() <= now) { log_debug << "connecting to pending " << pending_addr; gmcast_connect (pending_addr); } } } for (i = remote_addrs_.begin(); i != remote_addrs_.end(); i = i_next) { i_next = i, ++i_next; const std::string& remote_addr(AddrList::key(i)); const AddrEntry& ae(AddrList::value(i)); const UUID& remote_uuid(ae.uuid()); gcomm_assert(remote_uuid != uuid()); if (is_connected(remote_addr, remote_uuid) == false && ae.next_reconnect() <= now) { if (ae.retry_cnt() > ae.max_retries()) { log_info << " cleaning up " << remote_uuid << " (" << remote_addr << ")"; remote_addrs_.erase(i); continue;//no reference to remote_addr or remote_uuid after this } else if (ae.next_reconnect() <= now) { if (ae.retry_cnt() % 30 == 0) { log_info << self_string() << " reconnecting to " << remote_uuid << " (" << remote_addr << "), attempt " << ae.retry_cnt(); } gmcast_connect(remote_addr); } else { // } } } } namespace { class CmpUuidCounts { public: CmpUuidCounts(const std::set& uuids, gcomm::SegmentId preferred_segment) : uuids_(uuids), preferred_segment_(preferred_segment) { } size_t count(const gcomm::gmcast::Proto* p) const { size_t cnt(0); for (std::set::const_iterator i(uuids_.begin()); i != uuids_.end(); ++i) { for (gcomm::gmcast::LinkMap::const_iterator lm_i(p->link_map().begin()); lm_i != p->link_map().end(); ++lm_i) { if (lm_i->uuid() == *i) { ++cnt; break; } } } return cnt; } bool operator()(const gcomm::gmcast::Proto* a, const gcomm::gmcast::Proto* b) const { size_t ac(count(a)); size_t bc(count(b)); // if counts are equal, prefer peer from the same segment return (ac < bc || (ac == bc && a->remote_segment() != preferred_segment_)); } private: const std::set& uuids_; gcomm::SegmentId preferred_segment_; }; } void gcomm::GMCast::check_liveness() { std::set live_uuids; // iterate over proto map and mark all timed out entries as failed gu::datetime::Date now(gu::datetime::Date::now()); for (ProtoMap::iterator i(proto_map_->begin()); i != proto_map_->end(); ) { ProtoMap::iterator i_next(i); ++i_next; Proto* p(ProtoMap::value(i)); if (p->state() > Proto::S_INIT && p->state() < Proto::S_FAILED && p->tstamp() + peer_timeout_ < now) { log_info << self_string() << " connection to peer " << p->remote_uuid() << " with addr " << p->remote_addr() << " timed out, no messages seen in " << peer_timeout_; p->set_state(Proto::S_FAILED); handle_failed(p); } else if (p->state() == Proto::S_OK) { if (p->tstamp() + peer_timeout_*2/3 < now) { p->send_keepalive(); } if (p->state() == Proto::S_FAILED) { handle_failed(p); } else { live_uuids.insert(p->remote_uuid()); } } i = i_next; } bool should_relay(false); // iterate over addr list and check if there is at least one live // proto entry associated to each addr entry std::set nonlive_uuids; std::string nonlive_peers; for (AddrList::const_iterator i(remote_addrs_.begin()); i != remote_addrs_.end(); ++i) { const AddrEntry& ae(AddrList::value(i)); if (ae.retry_cnt() <= ae.max_retries() && live_uuids.find(ae.uuid()) == live_uuids.end()) { // log_info << self_string() // << " missing live proto entry for " << ae.uuid(); nonlive_uuids.insert(ae.uuid()); nonlive_peers += AddrList::key(i) + " "; should_relay = true; } else if (ae.last_connect() + peer_timeout_ > now) { log_debug << "continuing relaying for " << (ae.last_connect() + peer_timeout_ - now); should_relay = true; } } if (should_relay == true) { if (relaying_ == false) { log_info << self_string() << " turning message relay requesting on, nonlive peers: " << nonlive_peers; relaying_ = true; } relay_set_.clear(); // build set of protos having OK status std::set proto_set; for (ProtoMap::iterator i(proto_map_->begin()); i != proto_map_->end(); ++i) { Proto* p(ProtoMap::value(i)); if (p->state() == Proto::S_OK) { proto_set.insert(p); } } // find minimal set of proto entries required to reach maximum set // of nonlive peers while (nonlive_uuids.empty() == false && proto_set.empty() == false) { std::set::iterator maxel( std::max_element(proto_set.begin(), proto_set.end(), CmpUuidCounts(nonlive_uuids, segment_))); Proto* p(*maxel); log_debug << "relay set maxel :" << *p << " count: " << CmpUuidCounts(nonlive_uuids, segment_).count(p); relay_set_.insert(p->socket().get()); const LinkMap& lm(p->link_map()); for (LinkMap::const_iterator lm_i(lm.begin()); lm_i != lm.end(); ++lm_i) { nonlive_uuids.erase((*lm_i).uuid()); } proto_set.erase(maxel); } } else if (relaying_ == true && should_relay == false) { log_info << self_string() << " turning message relay requesting off"; relay_set_.clear(); relaying_ = false; } } gu::datetime::Date gcomm::GMCast::handle_timers() { const gu::datetime::Date now(gu::datetime::Date::now()); if (now >= next_check_) { check_liveness(); reconnect(); next_check_ = now + check_period_; } return next_check_; } void send(gcomm::Socket* s, gcomm::Datagram& dg) { int err; if ((err = s->send(dg)) != 0) { log_debug << "failed to send to " << s->remote_addr() << ": (" << err << ") " << strerror(err); } } void gcomm::GMCast::relay(const Message& msg, const Datagram& dg, const void* exclude_id) { Datagram relay_dg(dg); relay_dg.normalize(); Message relay_msg(msg); // reset all relay flags from message to be relayed relay_msg.set_flags(relay_msg.flags() & ~(Message::F_RELAY | Message::F_SEGMENT_RELAY)); // if F_RELAY is set in received message, relay to all peers except // the originator if (msg.flags() & Message::F_RELAY) { gu_trace(push_header(relay_msg, relay_dg)); for (SegmentMap::iterator i(segment_map_.begin()); i != segment_map_.end(); ++i) { Segment& segment(i->second); for (Segment::iterator j(segment.begin()); j != segment.end(); ++j) { if ((*j)->id() != exclude_id) { send(*j, relay_dg); } } } } else if (msg.flags() & Message::F_SEGMENT_RELAY) { if (relay_set_.empty() == false) { // send message to all nodes in relay set to reach // nodes in local segment that are not directly reachable relay_msg.set_flags(relay_msg.flags() | Message::F_RELAY); gu_trace(push_header(relay_msg, relay_dg)); for (std::set::iterator ri(relay_set_.begin()); ri != relay_set_.end(); ++ri) { send(*ri, relay_dg); } gu_trace(pop_header(relay_msg, relay_dg)); relay_msg.set_flags(relay_msg.flags() & ~Message::F_RELAY); } if (msg.segment_id() == segment_) { log_warn << "message with F_SEGMENT_RELAY from own segment, " << "source " << msg.source_uuid(); } // Relay to local segment gu_trace(push_header(relay_msg, relay_dg)); Segment& segment(segment_map_[segment_]); for (Segment::iterator i(segment.begin()); i != segment.end(); ++i) { send(*i, relay_dg); } } else { log_warn << "GMCast::relay() called without relay flags set"; } } void gcomm::GMCast::handle_up(const void* id, const Datagram& dg, const ProtoUpMeta& um) { ProtoMap::iterator i; if (listener_ == 0) { return; } if (id == listener_->id()) { gmcast_accept(); } else if (mcast_.get() != 0 && id == mcast_->id()) { Message msg; try { if (dg.offset() < dg.header_len()) { gu_trace(msg.unserialize(dg.header(), dg.header_size(), dg.header_offset() + dg.offset())); } else { gu_trace(msg.unserialize(&dg.payload()[0], dg.len(), dg.offset())); } } catch (gu::Exception& e) { GU_TRACE(e); log_warn << e.what(); return; } if (msg.type() >= Message::T_USER_BASE) { gu_trace(send_up(Datagram(dg, dg.offset() + msg.serial_size()), ProtoUpMeta(msg.source_uuid()))); } else { log_warn << "non-user message " << msg.type() << " from multicast socket"; } } else if ((i = proto_map_->find(id)) != proto_map_->end()) { Proto* p(ProtoMap::value(i)); if (dg.len() > 0) { const Proto::State prev_state(p->state()); if (prev_state == Proto::S_FAILED) { log_warn << "unhandled failed proto"; handle_failed(p); return; } Message msg; try { msg.unserialize(&dg.payload()[0], dg.len(), dg.offset()); } catch (gu::Exception& e) { GU_TRACE(e); log_warn << e.what(); p->set_state(Proto::S_FAILED); handle_failed(p); return; } if (msg.type() >= Message::T_USER_BASE) { if (evict_list().empty() == false && evict_list().find(msg.source_uuid()) != evict_list().end()) { return; } if (msg.flags() & (Message::F_RELAY | Message::F_SEGMENT_RELAY)) { relay(msg, Datagram(dg, dg.offset() + msg.serial_size()), id); } p->set_tstamp(gu::datetime::Date::now()); send_up(Datagram(dg, dg.offset() + msg.serial_size()), ProtoUpMeta(msg.source_uuid())); return; } else { try { p->set_tstamp(gu::datetime::Date::now()); gu_trace(p->handle_message(msg)); } catch (gu::Exception& e) { log_warn << "handling gmcast protocol message failed: " << e.what(); handle_failed(p); if (e.get_errno() == ENOTRECOVERABLE) { throw; } return; } if (p->state() == Proto::S_FAILED) { handle_failed(p); return; } else if (p->changed() == true) { update_addresses(); check_liveness(); reconnect(); } } if (prev_state != Proto::S_OK && p->state() == Proto::S_OK) { handle_established(p); } } else if (p->socket()->state() == Socket::S_CONNECTED && (p->state() == Proto::S_HANDSHAKE_WAIT || p->state() == Proto::S_INIT)) { handle_connected(p); } else if (p->socket()->state() == Socket::S_CONNECTED) { log_warn << "connection " << p->socket()->id() << " closed by peer"; p->set_state(Proto::S_FAILED); handle_failed(p); } else { log_debug << "socket in state " << p->socket()->state(); p->set_state(Proto::S_FAILED); handle_failed(p); } } else { // log_info << "proto entry " << id << " not found"; } } int gcomm::GMCast::handle_down(Datagram& dg, const ProtoDownMeta& dm) { Message msg(version_, Message::T_USER_BASE, uuid(), 1, segment_); // handle relay set first, skip these peers below if (relay_set_.empty() == false) { msg.set_flags(msg.flags() | Message::F_RELAY); gu_trace(push_header(msg, dg)); for (std::set::iterator ri(relay_set_.begin()); ri != relay_set_.end(); ++ri) { send(*ri, dg); } gu_trace(pop_header(msg, dg)); msg.set_flags(msg.flags() & ~Message::F_RELAY); } for (SegmentMap::iterator si(segment_map_.begin()); si != segment_map_.end(); ++si) { uint8_t segment_id(si->first); Segment& segment(si->second); if (segment_id != segment_) { size_t target_idx((self_index_ + segment_id) % segment.size()); msg.set_flags(msg.flags() | Message::F_SEGMENT_RELAY); // skip peers that are in relay set if (relay_set_.empty() == true || relay_set_.find(segment[target_idx]) == relay_set_.end()) { gu_trace(push_header(msg, dg)); send(segment[target_idx], dg); gu_trace(pop_header(msg, dg)); } } else { msg.set_flags(msg.flags() & ~Message::F_SEGMENT_RELAY); gu_trace(push_header(msg, dg)); for (Segment::iterator i(segment.begin()); i != segment.end(); ++i) { // skip peers that are in relay set if (relay_set_.empty() == true || relay_set_.find(*i) == relay_set_.end()) { send(*i, dg); } } gu_trace(pop_header(msg, dg)); } } return 0; } void gcomm::GMCast::handle_stable_view(const View& view) { log_debug << "GMCast::handle_stable_view: " << view; if (view.type() == V_PRIM) { // discard addr list entries not in view std::set gmcast_lst; for (AddrList::const_iterator i(remote_addrs_.begin()); i != remote_addrs_.end(); ++i) { gmcast_lst.insert(i->second.uuid()); } std::set view_lst; for (NodeList::const_iterator i(view.members().begin()); i != view.members().end(); ++i) { view_lst.insert(i->first); } std::list diff; std::set_difference(gmcast_lst.begin(), gmcast_lst.end(), view_lst.begin(), view_lst.end(), std::back_inserter(diff)); // Forget partitioned entries, allow them to reconnect // in time_wait_/2. Left nodes are given time_wait_ ban for // reconnecting when handling V_REG below. for (std::list::const_iterator i(diff.begin()); i != diff.end(); ++i) { gmcast_forget(*i, time_wait_/2); } // mark nodes in view as stable for (std::set::const_iterator i(view_lst.begin()); i != view_lst.end(); ++i) { AddrList::iterator ai; if ((ai = find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(*i))) != remote_addrs_.end()) { ai->second.set_retry_cnt(-1); ai->second.set_max_retries(max_retry_cnt_); } } // iterate over pending address list and discard entries without UUID for (AddrList::iterator i(pending_addrs_.begin()); i != pending_addrs_.end(); ) { AddrList::iterator i_next(i); ++i_next; const AddrEntry& ae(AddrList::value(i)); if (ae.uuid() == UUID()) { const std::string addr(AddrList::key(i)); log_info << "discarding pending addr without UUID: " << addr; for (ProtoMap::iterator pi(proto_map_->begin()); pi != proto_map_->end();) { ProtoMap::iterator pi_next(pi); ++pi_next; Proto* p(ProtoMap::value(pi)); if (p->remote_addr() == addr) { log_info << "discarding pending addr proto entry " << p; erase_proto(pi); } pi = pi_next; } pending_addrs_.erase(i); } i = i_next; } } else if (view.type() == V_REG) { for (NodeList::const_iterator i(view.members().begin()); i != view.members().end(); ++i) { AddrList::iterator ai; if ((ai = find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(NodeList::key(i)))) != remote_addrs_.end()) { log_info << "declaring " << NodeList::key(i) << " at " << handle_get_address(NodeList::key(i)) << " stable"; ai->second.set_retry_cnt(-1); ai->second.set_max_retries(max_retry_cnt_); } } // Forget left nodes for (NodeList::const_iterator i(view.left().begin()); i != view.left().end(); ++i) { gmcast_forget(NodeList::key(i), time_wait_); } } check_liveness(); for (ProtoMap::const_iterator i(proto_map_->begin()); i != proto_map_->end(); ++i) { log_debug << "proto: " << *ProtoMap::value(i); } } void gcomm::GMCast::handle_evict(const UUID& uuid) { if (is_evicted(uuid) == true) { return; } gmcast_forget(uuid, time_wait_); } std::string gcomm::GMCast::handle_get_address(const UUID& uuid) const { AddrList::const_iterator ali( find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(uuid))); return (ali == remote_addrs_.end() ? "" : AddrList::key(ali)); } void gcomm::GMCast::add_or_del_addr(const std::string& val) { if (val.compare(0, 4, "add:") == 0) { gu::URI uri(val.substr(4)); std::string addr(gu::net::resolve(uri_string(get_scheme(use_ssl_), uri.get_host(), uri.get_port())).to_string()); log_info << "inserting address '" << addr << "'"; insert_address(addr, UUID(), remote_addrs_); AddrList::iterator ai(remote_addrs_.find(addr)); AddrList::value(ai).set_max_retries( max_initial_reconnect_attempts_); AddrList::value(ai).set_retry_cnt(-1); } else if (val.compare(0, 4, "del:") == 0) { std::string addr(val.substr(4)); AddrList::iterator ai(remote_addrs_.find(addr)); if (ai != remote_addrs_.end()) { ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; Proto* rp = ProtoMap::value(pi); if (rp->remote_addr() == AddrList::key(ai)) { log_info << "deleting entry " << AddrList::key(ai); erase_proto(pi); } } AddrEntry& ae(AddrList::value(ai)); ae.set_max_retries(0); ae.set_retry_cnt(1); ae.set_next_reconnect(gu::datetime::Date::now() + time_wait_); update_addresses(); } else { log_info << "address '" << addr << "' not found from remote addrs list"; } } else { gu_throw_error(EINVAL) << "invalid addr spec '" << val << "'"; } } bool gcomm::GMCast::set_param(const std::string& key, const std::string& val) { try { if (key == Conf::GMCastMaxInitialReconnectAttempts) { max_initial_reconnect_attempts_ = gu::from_string(val); return true; } else if (key == Conf::GMCastPeerAddr) { try { add_or_del_addr(val); } catch (gu::NotFound& nf) { gu_throw_error(EINVAL) << "invalid addr spec '" << val << "'"; } catch (gu::NotSet& ns) { gu_throw_error(EINVAL) << "invalid addr spec '" << val << "'"; } return true; } else if (key == Conf::GMCastIsolate) { isolate_ = gu::from_string(val); log_info << "turning isolation " << (isolate_ == true ? "on" : "off"); if (isolate_ == true) { // delete all entries in proto map ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; erase_proto(pi); } segment_map_.clear(); } return true; } else if (key == Conf::SocketRecvBufSize) { gu_trace(Conf::check_recv_buf_size(val)); conf_.set(key, val); for (ProtoMap::iterator pi(proto_map_->begin()); pi != proto_map_->end(); ++pi) { gu_trace(pi->second->socket()->set_option(key, val)); // erase_proto(pi++); } // segment_map_.clear(); // reconnect(); return true; } else if (key == Conf::GMCastGroup || key == Conf::GMCastListenAddr || key == Conf::GMCastMCastAddr || key == Conf::GMCastMCastPort || key == Conf::GMCastMCastTTL || key == Conf::GMCastTimeWait || key == Conf::GMCastPeerTimeout || key == Conf::GMCastSegment) { gu_throw_error(EPERM) << "can't change value during runtime"; } } catch (gu::Exception& e) { GU_TRACE(e); throw; } catch (std::exception& e) { gu_throw_error(EINVAL) << e.what(); } catch (...) { gu_throw_error(EINVAL) << "exception"; } return false; } galera-3-25.3.20/gcomm/src/evs_node.hpp0000644000015300001660000001040713042054732017344 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef EVS_NODE_HPP #define EVS_NODE_HPP #include "evs_message2.hpp" #include "gcomm/map.hpp" #include "gcomm/uuid.hpp" #include "gu_datetime.hpp" #include "gu_logger.hpp" #include #include namespace gcomm { namespace evs { class Node; class NodeMap; std::ostream& operator<<(std::ostream&, const Node&); class InspectNode; class OperationalSelect; class Proto; } } class gcomm::evs::Node { public: Node(const Proto& proto) : proto_ (proto), index_ (std::numeric_limits::max()), operational_ (true), suspected_ (false), inactive_ (false), committed_ (false), installed_ (false), join_message_ (0), leave_message_ (0), delayed_list_message_(0), tstamp_ (gu::datetime::Date::now()), seen_tstamp_ (tstamp_), fifo_seq_ (-1), segment_ (0) {} Node(const Node& n); ~Node(); void set_index(const size_t idx) { index_ = idx; } size_t index() const { return index_; } void set_operational(const bool op) { gcomm_assert(op == false); operational_ = op; } bool operational() const { return operational_; } void set_suspected(const bool s) { suspected_ = s; } bool suspected() const { return suspected_; } void set_committed(const bool comm) { committed_ = comm; } bool committed() const { return committed_; } void set_installed(const bool inst) { installed_ = inst; } bool installed() const { return installed_; } void set_join_message(const JoinMessage* msg); const JoinMessage* join_message() const { return join_message_; } void set_leave_message(const LeaveMessage* msg); const LeaveMessage* leave_message() const { return leave_message_; } void set_delayed_list_message(const DelayedListMessage* msg); const DelayedListMessage *delayed_list_message() const { return delayed_list_message_; } void set_tstamp(const gu::datetime::Date& t) { tstamp_ = t; } const gu::datetime::Date& tstamp() const { return tstamp_; } void set_seen_tstamp(const gu::datetime::Date& t) { seen_tstamp_ = t; } const gu::datetime::Date& seen_tstamp() const { return seen_tstamp_; } void set_fifo_seq(const int64_t seq) { fifo_seq_ = seq; } int64_t fifo_seq() const { return fifo_seq_; } SegmentId segment() const { return segment_; } bool is_inactive() const; bool is_suspected() const; private: void operator=(const Node&); friend class InspectNode; const Proto& proto_; // Index for input map size_t index_; // True if instance is considered to be operational (has produced messages) bool operational_; bool suspected_; bool inactive_; // True if it is known that the instance has committed to install message bool committed_; // True if it is known that the instance has installed current view bool installed_; // Last received JOIN message JoinMessage* join_message_; // Leave message LeaveMessage* leave_message_; // Delayed list message DelayedListMessage* delayed_list_message_; // Timestamp denoting the last time a message from node // advanced input map state or membership protocol. This is used // for determining if the node should become suspected/inactive. gu::datetime::Date tstamp_; // Timestamp denoting the time when the node was seen last time. // This is used to decide if the node should be considered delayed. gu::datetime::Date seen_tstamp_; int64_t fifo_seq_; SegmentId segment_; }; class gcomm::evs::NodeMap : public Map { }; class gcomm::evs::OperationalSelect { public: OperationalSelect(NodeMap& nm_) : nm(nm_) { } void operator()(const NodeMap::value_type& vt) const { if (NodeMap::value(vt).operational() == true) { nm.insert_unique(vt); } } private: NodeMap& nm; }; class gcomm::evs::InspectNode { public: void operator()(std::pair& p) const; }; #endif // EVS_NODE_HPP galera-3-25.3.20/gcomm/src/evs_message2.hpp0000644000015300001660000005524213042054732020133 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ #ifndef EVS_MESSAGE2_HPP #define EVS_MESSAGE2_HPP #include "gcomm/order.hpp" #include "gcomm/view.hpp" #include "gcomm/map.hpp" #include "evs_seqno.hpp" #include "protocol_version.hpp" #include "gu_datetime.hpp" #include "gu_convert.hpp" namespace gcomm { namespace evs { class MessageNode; std::ostream& operator<<(std::ostream&, const MessageNode&); class MessageNodeList; class Message; std::ostream& operator<<(std::ostream&, const Message&); class UserMessage; class AggregateMessage; std::ostream& operator<<(std::ostream&, const AggregateMessage&); class DelegateMessage; class GapMessage; class JoinMessage; class LeaveMessage; class InstallMessage; class DelayedListMessage; class SelectNodesOp; class RangeLuCmp; class RangeHsCmp; } } class gcomm::evs::MessageNode { public: MessageNode(const bool operational = false, const bool suspected = false, const SegmentId segment = 0, const bool evicted = false, const seqno_t leave_seq = -1, const ViewId& view_id = ViewId(V_REG), const seqno_t safe_seq = -1, const Range im_range = Range()) : operational_(operational), suspected_ (suspected ), segment_ (segment ), evicted_ (evicted ), leave_seq_ (leave_seq ), view_id_ (view_id ), safe_seq_ (safe_seq ), im_range_ (im_range ) { } MessageNode(const MessageNode& mn) : operational_ (mn.operational_), suspected_ (mn.suspected_ ), segment_ (mn.segment_ ), evicted_ (mn.evicted_ ), leave_seq_ (mn.leave_seq_ ), view_id_ (mn.view_id_ ), safe_seq_ (mn.safe_seq_ ), im_range_ (mn.im_range_ ) { } bool operational() const { return operational_ ; } bool suspected() const { return suspected_ ; } bool evicted() const { return evicted_ ; } bool leaving() const { return (leave_seq_ != -1) ; } seqno_t leave_seq() const { return leave_seq_ ; } const ViewId& view_id() const { return view_id_ ; } seqno_t safe_seq() const { return safe_seq_ ; } Range im_range() const { return im_range_ ; } SegmentId segment() const { return segment_ ; } bool operator==(const MessageNode& cmp) const { return (operational_ == cmp.operational_ && suspected_ == cmp.suspected_ && leave_seq_ == cmp.leave_seq_ && view_id_ == cmp.view_id_ && safe_seq_ == cmp.safe_seq_ && im_range_ == cmp.im_range_); } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); static size_t serial_size(); private: enum { F_OPERATIONAL = 1 << 0, F_SUSPECTED = 1 << 1, F_EVICTED = 1 << 2 }; bool operational_; // Is operational bool suspected_; SegmentId segment_; bool evicted_; // Evicted out of the cluster seqno_t leave_seq_; ViewId view_id_; // Current view as seen by source of this message seqno_t safe_seq_; // Safe seq as seen... Range im_range_; // Input map range as seen... }; class gcomm::evs::MessageNodeList : public gcomm::Map { }; /*! * EVS message class */ class gcomm::evs::Message { public: enum Type { T_NONE = 0, T_USER = 1, /*!< User generated message */ T_DELEGATE = 2, /*!< Delegate message */ T_GAP = 3, /*!< Gap message */ T_JOIN = 4, /*!< Join message */ T_INSTALL = 5, /*!< Install message */ T_LEAVE = 6, /*!< Leave message */ T_DELAYED_LIST = 7 /*!< Evict list message */ }; typedef std::map DelayedList; static const uint8_t F_MSG_MORE = 0x1; /*!< Sender has more messages to send */ static const uint8_t F_RETRANS = 0x2; /*!< Message is resent upon request */ /*! * @brief Message source has been set explicitly via set_source() */ static const uint8_t F_SOURCE = 0x4; static const uint8_t F_AGGREGATE= 0x8; /*!< Message contains aggregated payload */ static const uint8_t F_COMMIT = 0x10; static const uint8_t F_BC = 0x20;/*!< Message was sent in backward compatibility mode */ /*! * Get version of the message * * @return Version number */ uint8_t version() const { return version_; } /*! * Get type of the message * * @return Message type */ Type type() const { return type_; } /*! * Check wheter message is of membership type * * @return True if message is of membership type, otherwise false */ bool is_membership() const { return (type_ == T_JOIN || type_ == T_INSTALL || type_ == T_LEAVE || type_ == T_DELAYED_LIST); } /*! * Get user type of the message. This is applicable only for * messages of type T_USER. * * @return User type of the message. */ uint8_t user_type() const { return user_type_; } /*! * Get message order type. * * @return Order type of the message. */ Order order() const { return order_; } /*! * Get sequence number associated to the message. * * @return Const reference to sequence number associated to the message. */ seqno_t seq() const { return seq_; } /*! * Get sequence numer range associated to the message. * * @return Sequence number range associated to the message. */ seqno_t seq_range() const { return seq_range_; } /*! * Get all-received-upto sequence number associated the the message. * * @return All-received-upto sequence number associated to the message. */ seqno_t aru_seq() const { return aru_seq_; } void set_flags(uint8_t flags) { flags_ = flags; } /*! * Get message flags. * * @return Message flags. */ uint8_t flags() const { return flags_; } /*! * Set message source * * @param uuid Source node uuid */ void set_source(const UUID& uuid) { source_ = uuid; flags_ |= F_SOURCE; } /*! * Get message source UUID. * * @return Message source UUID. */ const UUID& source() const { return source_; } /*! * Get message source view id, view where the message was originated * from. * * @return Message source view id. */ const gcomm::ViewId& source_view_id() const { return source_view_id_; } const gcomm::ViewId& install_view_id() const { return install_view_id_; } /*! * Get range UUID associated to the message. * * @return Range UUID associated to the message. */ const UUID& range_uuid() const { return range_uuid_; } /*! * Get range associated to the message. * * @return Range associated to the message. */ Range range() const { return range_; } /*! * Get fifo sequence number associated to the message. This is * applicable only for messages of membership type. * * @return Fifo sequence number associated to the message. */ int64_t fifo_seq() const { return fifo_seq_; } /*! * Get message node list. * * @return Const reference to message node list. */ const MessageNodeList& node_list() const { return node_list_; } /*! * Get timestamp associated to the message. */ gu::datetime::Date tstamp() const { return tstamp_; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); bool operator==(const Message& cmp) const; /*! * Copy constructor. */ Message(const Message& msg) : version_ (msg.version_), type_ (msg.type_), user_type_ (msg.user_type_), order_ (msg.order_), seq_ (msg.seq_), seq_range_ (msg.seq_range_), aru_seq_ (msg.aru_seq_), fifo_seq_ (msg.fifo_seq_), flags_ (msg.flags_), source_ (msg.source_), source_view_id_ (msg.source_view_id_), install_view_id_ (msg.install_view_id_), range_uuid_ (msg.range_uuid_), range_ (msg.range_), tstamp_ (msg.tstamp_), node_list_ (msg.node_list_), delayed_list_ (msg.delayed_list_) { } Message& operator=(const Message& msg) { version_ = msg.version_; type_ = msg.type_; user_type_ = msg.user_type_; order_ = msg.order_; seq_ = msg.seq_; seq_range_ = msg.seq_range_; aru_seq_ = msg.aru_seq_; fifo_seq_ = msg.fifo_seq_; flags_ = msg.flags_; source_ = msg.source_; source_view_id_ = msg.source_view_id_; install_view_id_ = msg.install_view_id_; range_uuid_ = msg.range_uuid_; range_ = msg.range_; tstamp_ = msg.tstamp_; node_list_ = msg.node_list_; delayed_list_ = msg.delayed_list_; return *this; } virtual ~Message() { } /*! Default constructor */ Message(const uint8_t version = 0, const Type type = T_NONE, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const ViewId& install_view_id = ViewId(), const uint8_t user_type = 0xff, const Order order = O_DROP, const int64_t fifo_seq = -1, const seqno_t seq = -1, const seqno_t seq_range = -1, const seqno_t aru_seq = -1, const uint8_t flags = 0, const UUID& range_uuid = UUID(), const Range range = Range(), const MessageNodeList& node_list = MessageNodeList()) : version_ (version), type_ (type), user_type_ (user_type), order_ (order), seq_ (seq), seq_range_ (seq_range), aru_seq_ (aru_seq), fifo_seq_ (fifo_seq), flags_ (flags), source_ (source), source_view_id_ (source_view_id), install_view_id_ (install_view_id), range_uuid_ (range_uuid), range_ (range), tstamp_ (gu::datetime::Date::now()), node_list_ (node_list), delayed_list_ () { } protected: size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t serial_size() const; // Version number: // For User, Gap, Leave messages that are exchanged only within a group // the version is minimum commonly supported version among the group, // computed during GATHER phase. // For Join, Install messages version is maximum supported protocol // version by the joiner. uint8_t version_; Type type_; uint8_t user_type_; Order order_; seqno_t seq_; seqno_t seq_range_; seqno_t aru_seq_; int64_t fifo_seq_; uint8_t flags_; UUID source_; ViewId source_view_id_; ViewId install_view_id_; UUID range_uuid_; Range range_; gu::datetime::Date tstamp_; MessageNodeList node_list_; DelayedList delayed_list_; }; /*! * User message class. */ class gcomm::evs::UserMessage : public Message { public: UserMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const seqno_t seq_range = 0, const Order order = O_SAFE, const int64_t fifo_seq = -1, const uint8_t user_type = 0xff, const uint8_t flags = 0) : Message(version, Message::T_USER, source, source_view_id, ViewId(), user_type, order, fifo_seq, seq, seq_range, aru_seq, flags, UUID(), Range()) { } void set_aru_seq(const seqno_t as) { aru_seq_ = as; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; }; class gcomm::evs::AggregateMessage { public: AggregateMessage(const int flags = 0, const size_t len = 0, const uint8_t user_type = 0xff) : flags_ (gu::convert(flags, uint8_t(0))), user_type_(user_type), len_ (gu::convert(len, uint16_t(0))) { } int flags() const { return flags_; } size_t len() const { return len_; } uint8_t user_type() const { return user_type_; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); size_t serial_size() const; bool operator==(const AggregateMessage& cmp) const { return (flags_ == cmp.flags_ && len_ == cmp.len_ && user_type_ == cmp.user_type_); } private: uint8_t flags_; uint8_t user_type_; uint16_t len_; }; inline std::ostream& gcomm::evs::operator<<(std::ostream& os, const AggregateMessage& am) { return (os << "{flags=" << am.flags() << ",len=" << am.len() << "}"); } class gcomm::evs::DelegateMessage : public Message { public: DelegateMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const int64_t fifo_seq = -1) : Message(version, T_DELEGATE, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; }; class gcomm::evs::GapMessage : public Message { public: GapMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const UUID& range_uuid = UUID::nil(), const Range range = Range(), const uint8_t flags = 0) : Message(version, T_GAP, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, flags, range_uuid, range) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; }; class gcomm::evs::JoinMessage : public Message { public: JoinMessage(const int max_version = 0, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const MessageNodeList& node_list = MessageNodeList()) : Message(max_version, Message::T_JOIN, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, 0, UUID(), Range(), node_list) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; }; class gcomm::evs::InstallMessage : public Message { public: InstallMessage(const int max_version = 0, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const ViewId& install_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const MessageNodeList& node_list = MessageNodeList()) : Message(max_version, Message::T_INSTALL, source, source_view_id, install_view_id, 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, F_SOURCE, UUID(), Range(), node_list) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; }; class gcomm::evs::LeaveMessage : public Message { public: LeaveMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const uint8_t flags = 0) : Message(version, T_LEAVE, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, flags) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; }; class gcomm::evs::DelayedListMessage : public Message { public: DelayedListMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t fifo_seq = -1) : Message(version, T_DELAYED_LIST, source, source_view_id, ViewId(), 0xff, O_DROP, fifo_seq) { } void add(const UUID& uuid, uint16_t cnt) { delayed_list_.insert(std::make_pair(uuid, cnt)); } const DelayedList& delayed_list() const { return delayed_list_; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, bool skip_header = false); size_t serial_size() const; bool operator==(const DelayedListMessage& cmp) const { return (delayed_list_ == cmp.delayed_list_); } private: }; class gcomm::evs::SelectNodesOp { public: SelectNodesOp(MessageNodeList& nl, const gcomm::ViewId& view_id, const bool operational, const bool leaving) : nl_ (nl), view_id_ (view_id), operational_ (operational), leaving_ (leaving) { } void operator()(const MessageNodeList::value_type& vt) const { const MessageNode& node(MessageNodeList::value(vt)); if ((view_id_ == ViewId() || node.view_id() == view_id_ ) && ((operational_ == true && leaving_ == true ) || (node.operational() == operational_ && node.leaving() == leaving_ ) ) ) { nl_.insert_unique(vt); } } private: MessageNodeList& nl_; ViewId const view_id_; bool const operational_; bool const leaving_; }; class gcomm::evs::RangeLuCmp { public: bool operator()(const MessageNodeList::value_type& a, const MessageNodeList::value_type& b) const { gcomm_assert(MessageNodeList::value(a).view_id() == MessageNodeList::value(b).view_id()); return (MessageNodeList::value(a).im_range().lu() < MessageNodeList::value(b).im_range().lu()); } }; class gcomm::evs::RangeHsCmp { public: bool operator()(const MessageNodeList::value_type& a, const MessageNodeList::value_type& b) const { gcomm_assert(MessageNodeList::value(a).view_id() == MessageNodeList::value(b).view_id()); return (MessageNodeList::value(a).im_range().hs() < MessageNodeList::value(b).im_range().hs()); } }; #endif // EVS_MESSAGE2_HPP galera-3-25.3.20/gcomm/src/asio_protonet.cpp0000644000015300001660000001002713042054732020420 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy */ #include "asio_tcp.hpp" #include "asio_udp.hpp" #include "asio_protonet.hpp" #include "socket.hpp" #include "gcomm/util.hpp" #include "gcomm/conf.hpp" #include "gu_logger.hpp" #include #include #include #include gcomm::AsioProtonet::AsioProtonet(gu::Config& conf, int version) : gcomm::Protonet(conf, "asio", version), mutex_(), poll_until_(gu::datetime::Date::max()), io_service_(), timer_(io_service_), #ifdef HAVE_ASIO_SSL_HPP ssl_context_(io_service_, asio::ssl::context::sslv23), #endif // HAVE_ASIO_SSL_HPP mtu_(1 << 15), checksum_(NetHeader::checksum_type( conf.get(gcomm::Conf::SocketChecksum, NetHeader::CS_CRC32C))) { conf.set(gcomm::Conf::SocketChecksum, checksum_); #ifdef HAVE_ASIO_SSL_HPP // use ssl if either private key or cert file is specified bool use_ssl(conf_.is_set(gu::conf::ssl_key) == true || conf_.is_set(gu::conf::ssl_cert) == true); try { // overrides use_ssl if set explicitly use_ssl = conf_.get(gu::conf::use_ssl); } catch (gu::NotSet& nf) {} if (use_ssl == true) { conf_.set(gu::conf::use_ssl, true); log_info << "initializing ssl context"; gu::ssl_prepare_context(conf_, ssl_context_); } #endif // HAVE_ASIO_SSL_HPP } gcomm::AsioProtonet::~AsioProtonet() { } void gcomm::AsioProtonet::enter() { mutex_.lock(); } void gcomm::AsioProtonet::leave() { mutex_.unlock(); } gcomm::SocketPtr gcomm::AsioProtonet::socket(const gu::URI& uri) { if (uri.get_scheme() == "tcp" || uri.get_scheme() == "ssl") { return boost::shared_ptr(new AsioTcpSocket(*this, uri)); } else if (uri.get_scheme() == "udp") { return boost::shared_ptr(new AsioUdpSocket(*this, uri)); } else { gu_throw_fatal << "scheme '" << uri.get_scheme() << "' not implemented"; } } gcomm::Acceptor* gcomm::AsioProtonet::acceptor(const gu::URI& uri) { return new AsioTcpAcceptor(*this, uri); } gu::datetime::Period handle_timers_helper(gcomm::Protonet& pnet, const gu::datetime::Period& period) { const gu::datetime::Date now(gu::datetime::Date::now()); const gu::datetime::Date stop(now + period); const gu::datetime::Date next_time(pnet.handle_timers()); const gu::datetime::Period sleep_p(std::min(stop - now, next_time - now)); return (sleep_p < 0 ? 0 : sleep_p); } void gcomm::AsioProtonet::event_loop(const gu::datetime::Period& period) { io_service_.reset(); poll_until_ = gu::datetime::Date::now() + period; const gu::datetime::Period p(handle_timers_helper(*this, period)); timer_.expires_from_now(boost::posix_time::nanosec(p.get_nsecs())); timer_.async_wait(boost::bind(&AsioProtonet::handle_wait, this, asio::placeholders::error)); io_service_.run(); } void gcomm::AsioProtonet::dispatch(const SocketId& id, const Datagram& dg, const ProtoUpMeta& um) { for (std::deque::iterator i = protos_.begin(); i != protos_.end(); ++i) { (*i)->dispatch(id, dg, um); } } void gcomm::AsioProtonet::interrupt() { io_service_.stop(); } void gcomm::AsioProtonet::handle_wait(const asio::error_code& ec) { gu::datetime::Date now(gu::datetime::Date::now()); const gu::datetime::Period p(handle_timers_helper(*this, poll_until_ - now)); using std::rel_ops::operator>=; if (ec == asio::error_code() && poll_until_ >= now) { timer_.expires_from_now(boost::posix_time::nanosec(p.get_nsecs())); timer_.async_wait(boost::bind(&AsioProtonet::handle_wait, this, asio::placeholders::error)); } else { io_service_.stop(); } } galera-3-25.3.20/gcomm/src/gmcast_link.hpp0000644000015300001660000000474513042054732020045 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_GMCAST_LINK_HPP #define GCOMM_GMCAST_LINK_HPP #include "gcomm/uuid.hpp" #include #include namespace gcomm { namespace gmcast { class Link; class LinkMapCmp; class LinkMap; std::ostream& operator<<(std::ostream& os, const LinkMap&); } } class gcomm::gmcast::Link { public: Link(const gcomm::UUID& uuid, const std::string& addr, const std::string& mcast_addr) : uuid_ (uuid), addr_ (addr), mcast_addr_(mcast_addr) { } bool operator==(const Link& cmp) const { return (uuid_ == cmp.uuid_ && addr_ == cmp.addr_); } bool operator<(const Link& cmp) const { return (uuid_ < cmp.uuid_ || (uuid_ == cmp.uuid_ && addr_ < cmp.addr_)); } const gcomm::UUID& uuid() const { return uuid_; } const std::string& addr() const { return addr_; } const std::string& mcast_addr() const { return mcast_addr_; } private: UUID uuid_; std::string addr_; std::string mcast_addr_; }; class gcomm::gmcast::LinkMap { typedef std::set MType; public: LinkMap() : link_map_() { } typedef MType::iterator iterator; typedef MType::const_iterator const_iterator; typedef MType::value_type value_type; std::pair insert(const Link& i) { return link_map_.insert(i); } iterator begin() { return link_map_.begin(); } const_iterator begin() const { return link_map_.begin(); } iterator end() { return link_map_.end(); } const_iterator end() const { return link_map_.end(); } const_iterator find(const value_type& vt) const { return link_map_.find(vt); } size_t size() const { return link_map_.size(); } static const UUID& key(const_iterator i) { return i->uuid(); } static const Link& value(const_iterator i) { return *i; } static const UUID& key(const value_type& vt) { return vt.uuid(); } static const Link& value(const value_type& vt) { return vt; } bool operator==(const LinkMap& cmp) const { return (link_map_ == cmp.link_map_); } private: MType link_map_; }; inline std::ostream& gcomm::gmcast::operator<<(std::ostream& os, const LinkMap& lm) { for (LinkMap::const_iterator i = lm.begin(); i != lm.end(); ++i) { os << "\n(" << LinkMap::key(i) << "," << LinkMap::value(i).addr() << ")"; } return (os << "\n"); } #endif // GCOMM_GMCAST_LINK_HPP galera-3-25.3.20/gcomm/src/protostack.cpp0000644000015300001660000000344513042054732017732 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "gcomm/protostack.hpp" #include "socket.hpp" #include "gcomm/util.hpp" void gcomm::Protostack::push_proto(Protolay* p) { Critical crit(*this); protos_.push_front(p); // connect the pushed Protolay that's now on top // with the one that was previously on top, // if we had one, of course. if (protos_.size() > 1) { gcomm::connect(protos_[1], p); } } void gcomm::Protostack::pop_proto(Protolay* p) { Critical crit(*this); assert(protos_.front() == p); if (protos_.front() != p) { log_warn << "Protolay " << p << " is not protostack front"; return; } protos_.pop_front(); if (protos_.begin() != protos_.end()) { gcomm::disconnect(*protos_.begin(), p); } } gu::datetime::Date gcomm::Protostack::handle_timers() { gu::datetime::Date ret(gu::datetime::Date::max()); Critical crit(*this); for (std::deque::reverse_iterator i = protos_.rbegin(); i != protos_.rend(); ++i) { gu::datetime::Date t((*i)->handle_timers()); if (t < ret) ret = t; } return ret; } void gcomm::Protostack::dispatch(const void* id, const Datagram& dg, const ProtoUpMeta& um) { Critical crit(*this); if (protos_.empty() == false) { protos_.back()->handle_up(id, dg, um); } } bool gcomm::Protostack::set_param(const std::string& key, const std::string& val) { bool ret(false); for (std::deque::iterator i(protos_.begin()); i != protos_.end(); ++i) { ret |= (*i)->set_param(key, val); } return ret; } galera-3-25.3.20/gcomm/src/pc_message.hpp0000644000015300001660000002640313042054732017653 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #ifndef PC_MESSAGE_HPP #define PC_MESSAGE_HPP #include "gcomm/view.hpp" #include "gcomm/types.hpp" #include "gcomm/uuid.hpp" #include "gcomm/map.hpp" #include "gu_serialize.hpp" #include "protocol_version.hpp" #include namespace gcomm { namespace pc { class Node; class NodeMap; class Message; class UserMessage; class StateMessage; class InstallMessage; std::ostream& operator<<(std::ostream&, const Node&); std::ostream& operator<<(std::ostream&, const Message&); bool operator==(const Message&, const Message&); } } class gcomm::pc::Node { public: enum Flags { F_PRIM = 0x1, F_WEIGHT = 0x2, F_UN = 0x4, F_EVICTED = 0x8 }; Node(const bool prim = false, const bool un = false, const bool evicted = false, const uint32_t last_seq = std::numeric_limits::max(), const ViewId& last_prim = ViewId(V_NON_PRIM), const int64_t to_seq = -1, const int weight = -1, const SegmentId segment = 0) : prim_ (prim ), un_ (un ), evicted_ (evicted ), last_seq_ (last_seq ), last_prim_ (last_prim), to_seq_ (to_seq ), weight_ (weight), segment_ (segment) { } void set_prim (const bool val) { prim_ = val ; } void set_un (const bool un) { un_ = un ; } void set_evicted (const bool evicted) { evicted_ = evicted ; } void set_last_seq (const uint32_t seq) { last_seq_ = seq ; } void set_last_prim (const ViewId& last_prim) { last_prim_ = last_prim; } void set_to_seq (const uint64_t seq) { to_seq_ = seq ; } void set_weight (const int weight) { weight_ = weight ; } void set_segment (const SegmentId segment) { segment_ = segment ; } bool prim() const { return prim_ ; } bool un() const { return un_ ; } bool evicted() const { return evicted_ ; } uint32_t last_seq() const { return last_seq_ ; } const ViewId& last_prim() const { return last_prim_; } int64_t to_seq() const { return to_seq_ ; } int weight() const { return weight_ ; } SegmentId segment() const { return segment_ ; } // // Serialized header // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // | flags | segment id | weight ¡ // size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off = offset; uint32_t header; gu_trace (off = gu::unserialize4(buf, buflen, off, header)); prim_ = header & F_PRIM; un_ = header & F_UN; if (header & F_WEIGHT) { weight_ = header >> 24; } else { weight_ = -1; } evicted_ = header & F_EVICTED; segment_ = (header >> 16) & 0xff; gu_trace (off = gu::unserialize4(buf, buflen, off, last_seq_)); gu_trace (off = last_prim_.unserialize(buf, buflen, off)); gu_trace (off = gu::unserialize8(buf, buflen, off, to_seq_)); return off; } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off = offset; uint32_t header = 0; header |= prim_ ? F_PRIM : 0; header |= un_ ? F_UN : 0; if (weight_ >= 0) { header |= F_WEIGHT; header |= weight_ << 24; } header |= evicted_ ? F_EVICTED : 0; header |= static_cast(segment_) << 16; gu_trace (off = gu::serialize4(header, buf, buflen, off)); gu_trace (off = gu::serialize4(last_seq_, buf, buflen, off)); gu_trace (off = last_prim_.serialize(buf, buflen, off)); gu_trace (off = gu::serialize8(to_seq_, buf, buflen, off)); assert (serial_size() == (off - offset)); return off; } static size_t serial_size() { Node* node(reinterpret_cast(0)); // header return (sizeof(uint32_t) + sizeof(node->last_seq_) + ViewId::serial_size() + sizeof(node->to_seq_)); } bool operator==(const Node& cmp) const { return (prim() == cmp.prim() && un() == cmp.un() && last_seq() == cmp.last_seq() && last_prim() == cmp.last_prim() && to_seq() == cmp.to_seq() && weight() == cmp.weight() && segment() == cmp.segment() ); } std::string to_string() const { std::ostringstream ret; ret << "prim=" << prim_ << ",un=" << un_ << ",last_seq=" << last_seq_ << ",last_prim=" << last_prim_ << ",to_seq=" << to_seq_ << ",weight=" << weight_ << ",segment=" << static_cast(segment_); return ret.str(); } private: bool prim_; // Is node in prim comp bool un_; // The prim status of the node is unknown bool evicted_; // Node has been evicted permanently from the group uint32_t last_seq_; // Last seen message seq from the node ViewId last_prim_; // Last known prim comp view id for the node int64_t to_seq_; // Last known TO seq for the node int weight_; // Node weight SegmentId segment_; }; inline std::ostream& gcomm::pc::operator<<(std::ostream& os, const Node& n) { return (os << n.to_string()); } class gcomm::pc::NodeMap : public Map { }; class gcomm::pc::Message { public: enum Type {T_NONE, T_STATE, T_INSTALL, T_USER, T_MAX}; enum { F_CRC16 = 0x1, F_BOOTSTRAP = 0x2, F_WEIGHT_CHANGE = 0x4 }; static const char* to_string(Type t) { static const char* str[T_MAX] = { "NONE", "STATE", "INSTALL", "USER" }; if (t < T_MAX) return str[t]; return "unknown"; } Message(const int version = -1, const Type type = T_NONE, const uint32_t seq = 0, const NodeMap& node_map = NodeMap()) : version_ (version ), flags_ (0 ), type_ (type ), seq_ (seq ), crc16_ (0 ), node_map_(node_map) { // Note: // PC message wire format has room only for version numbers up to 15. // At version 15 (latest) the wire format must change to match // 8 bit version width of EVS. assert(version < 15); } Message(const Message& msg) : version_ (msg.version_ ), flags_ (msg.flags_ ), type_ (msg.type_ ), seq_ (msg.seq_ ), crc16_ (msg.crc16_ ), node_map_(msg.node_map_) { } virtual ~Message() { } int version() const { return version_; } Type type() const { return type_; } uint32_t seq() const { return seq_; } void flags(int flags) { flags_ = flags; } int flags() const { return flags_; } void checksum(uint16_t crc16, bool flag) { crc16_ = crc16; if (flag == true) { flags_ |= F_CRC16; } else { flags_ &= ~F_CRC16; } } uint16_t checksum() const { return crc16_; } const NodeMap& node_map() const { return node_map_; } NodeMap& node_map() { return node_map_; } const Node& node(const UUID& uuid) const { return NodeMap::value(node_map_.find_checked(uuid)); } Node& node(const UUID& uuid) { return NodeMap::value(node_map_.find_checked(uuid)); } size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; uint32_t b; node_map_.clear(); gu_trace (off = gu::unserialize4(buf, buflen, offset, b)); version_ = b & 0x0f; if (version_ > GCOMM_PROTOCOL_MAX_VERSION) gu_throw_error (EPROTONOSUPPORT) << "Unsupported protocol varsion: " << version_; flags_ = (b & 0xf0) >> 4; type_ = static_cast((b >> 8) & 0xff); if (type_ <= T_NONE || type_ >= T_MAX) gu_throw_error (EINVAL) << "Bad type value: " << type_; crc16_ = ((b >> 16) & 0xffff); gu_trace (off = gu::unserialize4(buf, buflen, off, seq_)); if (type_ == T_STATE || type_ == T_INSTALL) { gu_trace (off = node_map_.unserialize(buf, buflen, off)); } return off; } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; uint32_t b; b = crc16_; b <<= 8; b |= type_ & 0xff; b <<= 8; b |= version_ & 0x0f; b |= (flags_ << 4) & 0xf0; gu_trace (off = gu::serialize4(b, buf, buflen, offset)); gu_trace (off = gu::serialize4(seq_, buf, buflen, off)); if (type_ == T_STATE || type_ == T_INSTALL) { gu_trace (off = node_map_.serialize(buf, buflen, off)); } assert (serial_size() == (off - offset)); return off; } size_t serial_size() const { // header return (sizeof(uint32_t) + sizeof(seq_) + (type_ == T_STATE || type_ == T_INSTALL ? node_map_.serial_size() : 0)); } std::string to_string() const { std::ostringstream ret; ret << "pcmsg{ type=" << to_string(type_) << ", seq=" << seq_; ret << ", flags=" << std::setw(2) << std::hex << flags_; ret << ", node_map {" << node_map() << "}"; ret << '}'; return ret.str(); } private: Message& operator=(const Message&); int version_; // Message version int flags_; // Flags Type type_; // Message type uint32_t seq_; // Message seqno uint16_t crc16_; // 16-bit crc NodeMap node_map_; // Message node map }; inline std::ostream& gcomm::pc::operator<<(std::ostream& os, const Message& m) { return (os << m.to_string()); } class gcomm::pc::StateMessage : public Message { public: StateMessage(int version) : Message(version, Message::T_STATE, 0) {} }; class gcomm::pc::InstallMessage : public Message { public: InstallMessage(int version) : Message(version, Message::T_INSTALL, 0) {} }; class gcomm::pc::UserMessage : public Message { public: UserMessage(int version, uint32_t seq) : Message(version, Message::T_USER, seq) {} }; inline bool gcomm::pc::operator==(const Message& a, const Message& b) { return (a.version() == b.version() && a.checksum() == b.checksum() && a.type() == b.type() && a.seq() == b.seq() && a.node_map() == b.node_map()); } #endif // PC_MESSAGE_HPP galera-3-25.3.20/gcomm/src/pc.cpp0000644000015300001660000001747213042054732016150 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #include "pc.hpp" #include "pc_proto.hpp" #include "evs_proto.hpp" #include "evs_message2.hpp" #include "gmcast.hpp" #include "defaults.hpp" #include "gcomm/conf.hpp" #include "gcomm/util.hpp" #include "gu_datetime.hpp" void gcomm::PC::handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (pc_recovery_ && um.err_no() == 0 && um.has_view() && um.view().id().type() == V_PRIM) { ViewState vst(const_cast(uuid()), const_cast(um.view()), conf_); log_info << "save pc into disk"; vst.write_file(); } send_up(rb, um); } int gcomm::PC::handle_down(Datagram& wb, const ProtoDownMeta& dm) { if (wb.len() == 0) { gu_throw_error(EMSGSIZE); } return send_down(wb, dm); } size_t gcomm::PC::mtu() const { // TODO: if (gmcast_ == 0) gu_throw_fatal << "not open"; evs::UserMessage evsm; pc::UserMessage pcm(0, 0); if (gmcast_->mtu() < 2*evsm.serial_size() + pcm.serial_size()) { gu_throw_fatal << "transport max msg size too small: " << gmcast_->mtu(); } return gmcast_->mtu() - 2*evsm.serial_size() - pcm.serial_size(); } const gcomm::UUID& gcomm::PC::uuid() const { return gmcast_->uuid(); } std::string gcomm::PC::listen_addr() const { return gmcast_->listen_addr(); } void gcomm::PC::connect(bool start_prim) { try { // for backward compatibility with old approach: gcomm://0.0.0.0 start_prim = (start_prim || host_is_any (uri_.get_host())); } catch (gu::NotSet& ns) { start_prim = true; } bool wait_prim(param(conf_, uri_, Conf::PcWaitPrim, Defaults::PcWaitPrim)); const gu::datetime::Period wait_prim_timeout( param(conf_, uri_, Conf::PcWaitPrimTimeout, Defaults::PcWaitPrimTimeout)); // --wsrep-new-cluster specified in command line // or cluster address as gcomm://0.0.0.0 or gcomm:// // should take precedence. otherwise it's not able to bootstrap. if (start_prim) { log_info << "start_prim is enabled, turn off pc_recovery"; } else if (rst_view_.type() == V_PRIM) { wait_prim = false; } pstack_.push_proto(gmcast_); pstack_.push_proto(evs_); pstack_.push_proto(pc_); pstack_.push_proto(this); pnet().insert(&pstack_); gmcast_->connect_precheck(start_prim); gmcast_->connect(); closed_ = false; evs_->shift_to(evs::Proto::S_JOINING); pc_->connect(start_prim); // Due to #658 there is limited announce period after which // node is allowed to proceed to non-prim if other nodes // are not detected. gu::datetime::Date try_until(gu::datetime::Date::now() + announce_timeout_); while (start_prim == false && evs_->known_size() <= 1) { // Send join messages without handling them evs_->send_join(false); pnet().event_loop(gu::datetime::Sec/2); if (try_until < gu::datetime::Date::now()) { break; } } log_debug << "PC/EVS Proto initial state: " << *evs_; if (evs_->state() != evs::Proto::S_OPERATIONAL) { log_debug << "PC/EVS Proto sending join request"; evs_->send_join(); } gcomm_assert(evs_->state() == evs::Proto::S_GATHER || evs_->state() == evs::Proto::S_INSTALL || evs_->state() == evs::Proto::S_OPERATIONAL); // - Due to #658 we loop here only if node is told to start in prim. // - Fix for #680, bypass waiting prim only if explicitly required try_until = gu::datetime::Date::now() + wait_prim_timeout; while ((wait_prim == true || start_prim == true) && pc_->state() != pc::Proto::S_PRIM) { pnet().event_loop(gu::datetime::Sec/2); if (try_until < gu::datetime::Date::now()) { pc_->close(); evs_->close(); gmcast_->close(); pnet().erase(&pstack_); pstack_.pop_proto(this); pstack_.pop_proto(pc_); pstack_.pop_proto(evs_); pstack_.pop_proto(gmcast_); gu_throw_error(ETIMEDOUT) << "failed to reach primary view"; } } pc_->set_mtu(mtu()); } void gcomm::PC::connect(const gu::URI& uri) { uri_ = uri; connect(); } void gcomm::PC::close(bool force) { if (force == true) { log_info << "Forced PC close"; gmcast_->close(); // Don't bother closing PC and EVS at this point. Currently // there is no way of knowing why forced close was issued, // so graceful close of PC and/or EVS may not be safe. // pc_->close(); // evs_->close(); } else { log_debug << "PC/EVS Proto leaving"; pc_->close(); evs_->close(); gu::datetime::Date wait_until(gu::datetime::Date::now() + linger_); do { pnet().event_loop(gu::datetime::Sec/2); } while (evs_->state() != evs::Proto::S_CLOSED && gu::datetime::Date::now() < wait_until); if (evs_->state() != evs::Proto::S_CLOSED) { evs_->shift_to(evs::Proto::S_CLOSED); } if (pc_->state() != pc::Proto::S_CLOSED) { log_warn << "PCProto didn't reach closed state"; } gmcast_->close(); } pnet().erase(&pstack_); pstack_.pop_proto(this); pstack_.pop_proto(pc_); pstack_.pop_proto(evs_); pstack_.pop_proto(gmcast_); ViewState::remove_file(conf_); closed_ = true; } void gcomm::PC::handle_get_status(gu::Status& status) const { status.insert("gcomm_uuid", uuid().full_str()); } gcomm::PC::PC(Protonet& net, const gu::URI& uri) : Transport (net, uri), gmcast_ (0), evs_ (0), pc_ (0), closed_ (true), linger_ (param( conf_, uri, Conf::PcLinger, "PT20S")), announce_timeout_(param( conf_, uri, Conf::PcAnnounceTimeout, Defaults::PcAnnounceTimeout)), pc_recovery_ (param(conf_, uri, Conf::PcRecovery, Defaults::PcRecovery)), rst_uuid_(), rst_view_() { if (uri_.get_scheme() != Conf::PcScheme) { log_fatal << "invalid uri: " << uri_.to_string(); } conf_.set(Conf::PcRecovery, gu::to_string(pc_recovery_)); bool restored = false; ViewState vst(rst_uuid_, rst_view_, conf_); if (pc_recovery_) { if (vst.read_file()) { log_info << "restore pc from disk successfully"; restored = true; } else { log_info << "restore pc from disk failed"; } } else { log_info << "skip pc recovery and remove state file"; ViewState::remove_file(conf_); } gmcast_ = new GMCast(pnet(), uri_, restored ? &rst_uuid_ : NULL); const UUID& uuid(gmcast_->uuid()); if (uuid == UUID::nil()) { gu_throw_fatal << "invalid UUID: " << uuid; } evs::UserMessage evsum; evs_ = new evs::Proto(pnet().conf(), uuid, gmcast_->segment(), uri_, gmcast_->mtu() - 2*evsum.serial_size(), restored ? &rst_view_ : NULL); pc_ = new pc::Proto (pnet().conf(), uuid, gmcast_->segment(), uri_, restored ? &rst_view_ : NULL); conf_.set(Conf::PcLinger, gu::to_string(linger_)); } gcomm::PC::~PC() { if (!closed_) { try { close(); } catch (...) { } sleep(1); // half-hearted attempt to avoid race with client threads } delete gmcast_; delete evs_; delete pc_; } galera-3-25.3.20/gcomm/src/gmcast.hpp0000644000015300001660000001733713042054732017031 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ /* * Generic multicast transport. Uses tcp connections if real multicast * is not available. */ #ifndef GCOMM_GMCAST_HPP #define GCOMM_GMCAST_HPP #include "gmcast_proto.hpp" #include "gcomm/uuid.hpp" #include "gcomm/exception.hpp" #include "gcomm/transport.hpp" #include "gcomm/types.hpp" #include #ifndef GCOMM_GMCAST_MAX_VERSION #define GCOMM_GMCAST_MAX_VERSION 0 #endif // GCOMM_GMCAST_MAX_VERSION namespace gcomm { namespace gmcast { class Proto; class Node; class Message; } class GMCast : public Transport { public: GMCast (Protonet&, const gu::URI&, const UUID* my_uuid = NULL); ~GMCast(); // Protolay interface void handle_up(const void*, const Datagram&, const ProtoUpMeta&); int handle_down(Datagram&, const ProtoDownMeta&); void handle_stable_view(const View& view); void handle_evict(const UUID& uuid); std::string handle_get_address(const UUID& uuid) const; bool set_param(const std::string& key, const std::string& val); // Transport interface const UUID& uuid() const { return my_uuid_; } SegmentId segment() const { return segment_; } void connect_precheck(bool start_prim); void connect(); void connect(const gu::URI&); void close(bool force = false); void close(const UUID& uuid) { gmcast_forget(uuid, time_wait_); } void listen() { gu_throw_fatal << "gmcast transport listen not implemented"; } std::string listen_addr() const { if (listener_ == 0) { gu_throw_error(ENOTCONN) << "not connected"; } return listener_->listen_addr(); } Transport* accept() { gu_throw_fatal << "gmcast transport accept not implemented"; } size_t mtu() const { return pnet_.mtu() - (4 + UUID::serial_size()); } void remove_viewstate_file() const { ViewState::remove_file(conf_); } private: GMCast (const GMCast&); GMCast& operator=(const GMCast&); static const long max_retry_cnt_; class AddrEntry { public: AddrEntry(const gu::datetime::Date& last_seen, const gu::datetime::Date& next_reconnect, const UUID& uuid) : uuid_ (uuid), last_seen_ (last_seen), next_reconnect_ (next_reconnect), last_connect_ (0), retry_cnt_ (0), max_retries_ (0) { } const UUID& uuid() const { return uuid_; } void set_last_seen(const gu::datetime::Date& d) { last_seen_ = d; } const gu::datetime::Date& last_seen() const { return last_seen_; } void set_next_reconnect(const gu::datetime::Date& d) { next_reconnect_ = d; } const gu::datetime::Date& next_reconnect() const { return next_reconnect_; } void set_last_connect() { last_connect_ = gu::datetime::Date::now(); } const gu::datetime::Date& last_connect() const { return last_connect_; } void set_retry_cnt(const int r) { retry_cnt_ = r; } int retry_cnt() const { return retry_cnt_; } void set_max_retries(int mr) { max_retries_ = mr; } int max_retries() const { return max_retries_; } private: friend std::ostream& operator<<(std::ostream&, const AddrEntry&); void operator=(const AddrEntry&); UUID uuid_; gu::datetime::Date last_seen_; gu::datetime::Date next_reconnect_; gu::datetime::Date last_connect_; int retry_cnt_; int max_retries_; }; typedef Map AddrList; class AddrListUUIDCmp { public: AddrListUUIDCmp(const UUID& uuid) : uuid_(uuid) { } bool operator()(const AddrList::value_type& cmp) const { return (cmp.second.uuid() == uuid_); } private: UUID uuid_; }; int version_; static const int max_version_ = GCOMM_GMCAST_MAX_VERSION; uint8_t segment_; UUID my_uuid_; bool use_ssl_; std::string group_name_; std::string listen_addr_; std::set initial_addrs_; std::string mcast_addr_; std::string bind_ip_; int mcast_ttl_; Acceptor* listener_; SocketPtr mcast_; AddrList pending_addrs_; AddrList remote_addrs_; AddrList addr_blacklist_; bool relaying_; bool isolate_; gmcast::ProtoMap* proto_map_; std::set relay_set_; typedef std::vector Segment; typedef std::map SegmentMap; SegmentMap segment_map_; // self index in local segment when ordered by UUID size_t self_index_; gu::datetime::Period time_wait_; gu::datetime::Period check_period_; gu::datetime::Period peer_timeout_; int max_initial_reconnect_attempts_; gu::datetime::Date next_check_; gu::datetime::Date handle_timers(); // Erase ProtoMap entry in a safe way so that all lookup lists // become properly updated. void erase_proto(gmcast::ProtoMap::iterator); // Accept new connection void gmcast_accept(); // Initialize connecting to remote host void gmcast_connect(const std::string&); // Forget node void gmcast_forget(const gcomm::UUID&, const gu::datetime::Period&); // Handle proto entry that has established connection to remote host void handle_connected(gmcast::Proto*); // Handle proto entry that has succesfully finished handshake // sequence void handle_established(gmcast::Proto*); // Handle proto entry that has failed void handle_failed(gmcast::Proto*); // Check if there exists connection that matches to either // remote addr or uuid bool is_connected(const std::string& addr, const UUID& uuid) const; // Inset address to address list void insert_address(const std::string& addr, const UUID& uuid, AddrList&); // Scan through proto entries and update address lists void update_addresses(); // void check_liveness(); void relay(const gmcast::Message& msg, const Datagram& dg, const void* exclude_id); // Reconnecting void reconnect(); void set_initial_addr(const gu::URI&); void add_or_del_addr(const std::string&); std::string self_string() const { std::ostringstream os; os << '(' << my_uuid_ << ", '" << listen_addr_ << "')"; return os.str(); } friend std::ostream& operator<<(std::ostream&, const AddrEntry&); }; inline std::ostream& operator<<(std::ostream& os, const GMCast::AddrEntry& ae) { return (os << ae.uuid_ << " last_seen=" << ae.last_seen_ << " next_reconnect=" << ae.next_reconnect_ << " retry_cnt=" << ae.retry_cnt_); } } #endif // GCOMM_GMCAST_HPP galera-3-25.3.20/gcomm/src/evs_input_map2.hpp0000644000015300001660000002175413042054732020504 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! * @file Input map for EVS messaging. Provides simple interface for * handling messages with different safety guarantees. * * @note When operating with iterators, note that evs::Message * accessed through iterator may have different sequence * number as it position dictates. Use sequence number * found from key part. * * @todo Fix issue in above note if feasible. */ #ifndef EVS_INPUT_MAP2_HPP #define EVS_INPUT_MAP2_HPP #include "evs_message2.hpp" #include "gcomm/map.hpp" #include "gcomm/datagram.hpp" #include namespace gcomm { /* Forward declarations */ class InputMapMsgKey; std::ostream& operator<<(std::ostream&, const InputMapMsgKey&); namespace evs { class InputMapMsg; std::ostream& operator<<(std::ostream&, const InputMapMsg&); class InputMapMsgIndex; class InputMapNode; std::ostream& operator<<(std::ostream&, const InputMapNode&); typedef std::vector InputMapNodeIndex; std::ostream& operator<<(std::ostream&, const InputMapNodeIndex&); class InputMap; std::ostream& operator<<(std::ostream&, const InputMap&); } } /* Internal msg representation */ class gcomm::InputMapMsgKey { public: InputMapMsgKey(const size_t index, const evs::seqno_t seq) : index_ (index), seq_ (seq) { } size_t index() const { return index_; } evs::seqno_t seq () const { return seq_; } bool operator<(const InputMapMsgKey& cmp) const { return (seq_ < cmp.seq_ || (seq_ == cmp.seq_ && index_ < cmp.index_)); } private: size_t const index_; evs::seqno_t const seq_; }; /* Internal message representation */ class gcomm::evs::InputMapMsg { public: InputMapMsg(const UserMessage& msg, const Datagram& rb) : msg_(msg), rb_ (rb) { } InputMapMsg(const InputMapMsg& m) : msg_(m.msg_), rb_ (m.rb_) { } ~InputMapMsg() { } const UserMessage& msg () const { return msg_; } const Datagram& rb () const { return rb_; } private: void operator=(const InputMapMsg&); UserMessage const msg_; Datagram rb_; }; #if defined(GALERA_USE_BOOST_POOL_ALLOC) #include class gcomm::evs::InputMapMsgIndex : public Map, boost::fast_pool_allocator< std::pair, boost::default_user_allocator_new_delete, boost::details::pool::null_mutex > > > {}; #else /* GALERA_USE_BOOST_POOL_ALLOC */ class gcomm::evs::InputMapMsgIndex : public Map {}; #endif /* GALERA_USE_BOOST_POOL_ALLOC */ /* Internal node representation */ class gcomm::evs::InputMapNode { public: InputMapNode() : idx_(), range_(0, -1), safe_seq_(-1) { } void set_range (const Range r) { range_ = r; } void set_safe_seq (const seqno_t s) { safe_seq_ = s; } void set_index (const size_t i) { idx_ = i; } Range range () const { return range_; } seqno_t safe_seq () const { return safe_seq_; } size_t index () const { return idx_; } private: size_t idx_; Range range_; seqno_t safe_seq_; }; /*! * Input map for messages. * */ class gcomm::evs::InputMap { public: /* Iterators exposed to user */ typedef InputMapMsgIndex::iterator iterator; typedef InputMapMsgIndex::const_iterator const_iterator; /*! * Default constructor. */ InputMap(); /*! * Default destructor. */ ~InputMap(); /*! * Get current value of aru_seq. * * @return Current value of aru_seq */ seqno_t aru_seq () const { return aru_seq_; } /*! * Get current value of safe_seq. * * @return Current value of safe_seq */ seqno_t safe_seq() const { return safe_seq_; } /*! * Set sequence number safe for node. * * @param uuid Node uuid * @param seq Sequence number to be set safe * * @throws FatalException if node was not found or sequence number * was not in the allowed range */ void set_safe_seq(const size_t uuid, const seqno_t seq); /*! * Get current value of safe_seq for node. * * @param uuid Node uuid * * @return Safe sequence number for node * * @throws FatalException if node was not found */ seqno_t safe_seq(const size_t uuid) const { return node_index_->at(uuid).safe_seq(); } /*! * Get current range parameter for node * * @param uuid Node uuid * * @return Range parameter for node * * @throws FatalException if node was not found */ Range range (const size_t uuid) const { return node_index_->at(uuid).range(); } seqno_t min_hs() const; seqno_t max_hs() const; /*! * Get iterator to the beginning of the input map * * @return Iterator pointing to the first element */ iterator begin() const { return msg_index_->begin(); } /*! * Get iterator next to the last element of the input map * * @return Iterator pointing past the last element */ iterator end () const { return msg_index_->end(); } /*! * Check if message pointed by iterator fulfills O_SAFE condition. * * @return True or false */ bool is_safe (iterator i) const { const seqno_t seq(InputMapMsgIndex::key(i).seq()); return (seq <= safe_seq_); } /*! * Check if message pointed by iterator fulfills O_AGREED condition. * * @return True or false */ bool is_agreed(iterator i) const { const seqno_t seq(InputMapMsgIndex::key(i).seq()); return (seq <= aru_seq_); } /*! * Check if message pointed by iterator fulfills O_FIFO condition. * * @return True or false */ bool is_fifo (iterator i) const { const seqno_t seq(InputMapMsgIndex::key(i).seq()); const InputMapNode& node((*node_index_)[ InputMapMsgIndex::key(i).index()]); return (node.range().lu() > seq); } /*! * Insert new message into input map. * * @param uuid Node uuid of the message source * @param msg EVS message * @param rb ReadBuf pointer associated to message * @param offset Offset to the beginning of the payload * * @return Range parameter of the node * * @throws FatalException if node not found or message sequence * number is out of allowed range */ Range insert(const size_t uuid, const UserMessage& msg, const Datagram& dg = Datagram()); /*! * Erase message pointed by iterator. Note that message may still * be recovered through recover() method as long as it does not * fulfill O_SAFE constraint. * * @param i Iterator * * @throws FatalException if iterator is not valid */ void erase(iterator i); /*! * Find message. * * @param uuid Message source node uuid * @param seq Message sequence numeber * * @return Iterator pointing to message or at end() if message was not found * * @throws FatalException if node was not found */ iterator find(const size_t uuid, const seqno_t seq) const; /*! * Recover message. * * @param uuid Message source node uuid * @param seq Message sequence number * * @return Iterator pointing to the message * * @throws FatalException if node or message was not found */ iterator recover(const size_t uuid, const seqno_t seq) const; /*! * */ void reset(const size_t, const seqno_t = 256); /*! * Clear input map state. */ void clear(); private: friend std::ostream& operator<<(std::ostream&, const InputMap&); /* Non-copyable */ InputMap(const InputMap&); void operator=(const InputMap&); /*! * Update aru_seq value to represent current state. */ void update_aru(); /*! * Clean up recovery index. All messages up to safe_seq are removed. */ void cleanup_recovery_index(); seqno_t window_; seqno_t safe_seq_; /*!< Safe seqno */ seqno_t aru_seq_; /*!< All received upto seqno */ InputMapNodeIndex* node_index_; /*!< Index of nodes */ InputMapMsgIndex* msg_index_; /*!< Index of messages */ InputMapMsgIndex* recovery_index_; /*!< Recovery index */ }; #endif // EVS_INPUT_MAP2_HPP galera-3-25.3.20/gcomm/src/asio_udp.cpp0000644000015300001660000001612213042054732017340 0ustar jenkinsjenkins/* * Copyright (C) 2010-2012 Codership Oy */ #include "asio_udp.hpp" #include "gcomm/util.hpp" #include "gcomm/common.hpp" #include #include static bool is_multicast(const asio::ip::udp::endpoint& ep) { if (ep.address().is_v4() == true) { return ep.address().to_v4().is_multicast(); } else if (ep.address().is_v6() == true) { return ep.address().to_v6().is_multicast(); } gu_throw_fatal; } static void join_group(asio::ip::udp::socket& socket, const asio::ip::udp::endpoint& ep, const asio::ip::address& local_if) { gcomm_assert(is_multicast(ep) == true); if (ep.address().is_v4() == true) { socket.set_option(asio::ip::multicast::join_group(ep.address().to_v4(), local_if.to_v4())); socket.set_option(asio::ip::multicast::outbound_interface(local_if.to_v4())); } else { gu_throw_fatal << "mcast interface not implemented"; socket.set_option(asio::ip::multicast::join_group(ep.address().to_v6())); } } static void leave_group(asio::ip::udp::socket& socket, asio::ip::udp::endpoint& ep) { // gcomm_assert(is_multicast(ep) == true); // socket.set_option(asio::ip::multicast::leave_group(ep.address().to_v4())); } gcomm::AsioUdpSocket::AsioUdpSocket(AsioProtonet& net, const gu::URI& uri) : Socket(uri), net_(net), state_(S_CLOSED), socket_(net_.io_service_), target_ep_(), source_ep_(), recv_buf_((1 << 15) + NetHeader::serial_size_) { } gcomm::AsioUdpSocket::~AsioUdpSocket() { close(); } void gcomm::AsioUdpSocket::connect(const gu::URI& uri) { gcomm_assert(state() == S_CLOSED); Critical crit(net_); asio::ip::udp::resolver resolver(net_.io_service_); asio::ip::udp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port()); asio::ip::udp::resolver::iterator conn_i(resolver.resolve(query)); target_ep_ = conn_i->endpoint(); socket_.open(conn_i->endpoint().protocol()); socket_.set_option(asio::ip::udp::socket::reuse_address(true)); socket_.set_option(asio::ip::udp::socket::linger(true, 1)); gu::set_fd_options(socket_); asio::ip::udp::socket::non_blocking_io cmd(true); socket_.io_control(cmd); const std::string if_addr( gu::unescape_addr( uri.get_option("socket.if_addr", gu::any_addr(conn_i->endpoint().address())))); asio::ip::address local_if(asio::ip::address::from_string(if_addr)); if (is_multicast(conn_i->endpoint()) == true) { join_group(socket_, conn_i->endpoint(), local_if); socket_.set_option( asio::ip::multicast::enable_loopback( gu::from_string(uri.get_option("socket.if_loop", "false")))); socket_.set_option( asio::ip::multicast::hops( gu::from_string(uri.get_option("socket.mcast_ttl", "1")))); socket_.bind(*conn_i); } else { socket_.bind( asio::ip::udp::endpoint( local_if, gu::from_string(uri.get_port()))); } async_receive(); state_ = S_CONNECTED; } void gcomm::AsioUdpSocket::close() { Critical crit(net_); if (state() != S_CLOSED) { if (is_multicast(target_ep_) == true) { leave_group(socket_, target_ep_); } socket_.close(); } state_ = S_CLOSED; } int gcomm::AsioUdpSocket::send(const Datagram& dg) { Critical crit(net_); boost::array cbs; NetHeader hdr(dg.len(), net_.version_); if (net_.checksum_ != NetHeader::CS_NONE) { hdr.set_crc32(crc32(net_.checksum_, dg), net_.checksum_); } gu::byte_t buf[NetHeader::serial_size_]; serialize(hdr, buf, sizeof(buf), 0); cbs[0] = asio::const_buffer(buf, sizeof(buf)); cbs[1] = asio::const_buffer(dg.header() + dg.header_offset(), dg.header_len()); cbs[2] = asio::const_buffer(&dg.payload()[0], dg.payload().size()); try { socket_.send_to(cbs, target_ep_); } catch (asio::system_error& err) { log_warn << "Error: " << err.what(); return err.code().value(); } return 0; } void gcomm::AsioUdpSocket::read_handler(const asio::error_code& ec, size_t bytes_transferred) { if (ec) { // return; } if (bytes_transferred >= NetHeader::serial_size_) { Critical crit(net_); NetHeader hdr; try { unserialize(&recv_buf_[0], NetHeader::serial_size_, 0, hdr); } catch (gu::Exception& e) { log_warn << "hdr unserialize failed: " << e.get_errno(); return; } if (NetHeader::serial_size_ + hdr.len() != bytes_transferred) { log_warn << "len " << hdr.len() << " does not match to bytes transferred" << bytes_transferred; } else { Datagram dg( gu::SharedBuffer( new gu::Buffer(&recv_buf_[0] + NetHeader::serial_size_, &recv_buf_[0] + NetHeader::serial_size_ + hdr.len()))); if (net_.checksum_ == true && check_cs(hdr, dg)) { log_warn << "checksum failed, hdr: len=" << hdr.len() << " has_crc32=" << hdr.has_crc32() << " has_crc32c=" << hdr.has_crc32c() << " crc32=" << hdr.crc32(); } else { net_.dispatch(id(), dg, ProtoUpMeta()); } } } else { log_warn << "short read of " << bytes_transferred; } async_receive(); } void gcomm::AsioUdpSocket::async_receive() { Critical crit(net_); boost::array mbs; mbs[0] = asio::mutable_buffer(&recv_buf_[0], recv_buf_.size()); socket_.async_receive_from(mbs, source_ep_, boost::bind(&AsioUdpSocket::read_handler, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred)); } size_t gcomm::AsioUdpSocket::mtu() const { return (1 << 15); } std::string gcomm::AsioUdpSocket::local_addr() const { return uri_string(gu::scheme::udp, gu::escape_addr(socket_.local_endpoint().address()), gu::to_string(socket_.local_endpoint().port())); } std::string gcomm::AsioUdpSocket::remote_addr() const { return uri_string(gu::scheme::udp, gu::escape_addr(socket_.remote_endpoint().address()), gu::to_string(socket_.remote_endpoint().port())); } galera-3-25.3.20/gcomm/src/socket.hpp0000644000015300001660000000437713042054732017043 0ustar jenkinsjenkins// // Copyright (C) 2009 Codership Oy // //! // @file socket.hpp Socket interface. // // This file defines socket interface used by gcomm. Currently socket interface // provides synchronous send() but only async_recv(). // #ifndef GCOMM_SOCKET_HPP #define GCOMM_SOCKET_HPP #include "gcomm/datagram.hpp" #include "gu_uri.hpp" namespace gcomm { typedef const void* SocketId; //!< Socket Identifier class Socket; //!< Socket interface typedef boost::shared_ptr SocketPtr; class Acceptor; //!< Acceptor interfacemat } class gcomm::Socket { public: typedef enum { S_CLOSED, S_CONNECTING, S_CONNECTED, S_FAILED, S_CLOSING } State; /*! * Symbolic option names (to specify in URI) */ static const std::string OptNonBlocking; /*! socket.non_blocking */ static const std::string OptIfAddr; /*! socket.if_addr */ static const std::string OptIfLoop; /*! socket.if_loop */ static const std::string OptCRC32; /*! socket.crc32 */ static const std::string OptMcastTTL; /*! socket.mcast_ttl */ Socket(const gu::URI& uri) : uri_(uri) { } virtual ~Socket() { } virtual void connect(const gu::URI& uri) = 0; virtual void close() = 0; virtual void set_option(const std::string& key, const std::string& val) = 0; virtual int send(const Datagram& dg) = 0; virtual void async_receive() = 0; virtual size_t mtu() const = 0; virtual std::string local_addr() const = 0; virtual std::string remote_addr() const = 0; virtual State state() const = 0; virtual SocketId id() const = 0; protected: const gu::URI uri_; }; class gcomm::Acceptor { public: typedef enum { S_CLOSED, S_LISTENING, S_FAILED } State; Acceptor(const gu::URI& uri) : uri_(uri) { } virtual ~Acceptor() { } virtual void listen(const gu::URI& uri) = 0; virtual std::string listen_addr() const = 0; virtual void close() = 0; virtual State state() const = 0; virtual SocketPtr accept() = 0; virtual SocketId id() const = 0; protected: const gu::URI uri_; }; #endif // GCOMM_SOCKET_HPP galera-3-25.3.20/gcomm/src/pc.hpp0000644000015300001660000000254713042054732016152 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "gcomm/transport.hpp" namespace gcomm { class GMCast; namespace evs { class Proto; } namespace pc { class Proto; } class PC : public Transport { public: PC (Protonet&, const gu::URI&); ~PC(); void connect(bool start_prim = false); void connect(const gu::URI&); std::string listen_addr() const; void close(bool force = false); void handle_up(const void*, const Datagram&, const ProtoUpMeta&); int handle_down(Datagram&, const ProtoDownMeta&); const UUID& uuid() const; size_t mtu() const; void handle_get_status(gu::Status& status) const; private: GMCast* gmcast_; // GMCast transport evs::Proto* evs_; // EVS protocol layer pc::Proto* pc_; // PC protocol layer bool closed_; // flag for destructor // Period to wait graceful leave gu::datetime::Period linger_; gu::datetime::Period announce_timeout_; bool pc_recovery_; UUID rst_uuid_; View rst_view_; PC(const PC&); void operator=(const PC&); }; } // namespace gcomm galera-3-25.3.20/gcomm/src/asio_udp.hpp0000644000015300001660000000333613042054732017350 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy */ #ifndef GCOMM_ASIO_UDP_HPP #define GCOMM_ASIO_UDP_HPP #include "socket.hpp" #include "asio_protonet.hpp" #include #include // // Boost enable_shared_from_this<> does not have virtual destructor, // therefore need to ignore -Weffc++ // #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif namespace gcomm { class AsioUdpSocket; class AsioProtonet; } class gcomm::AsioUdpSocket : public gcomm::Socket, public boost::enable_shared_from_this { public: AsioUdpSocket(AsioProtonet& net, const gu::URI& uri); ~AsioUdpSocket(); void connect(const gu::URI& uri); void close(); void set_option(const std::string&, const std::string&) { /* not implemented */ } int send(const Datagram& dg); void read_handler(const asio::error_code&, size_t); void async_receive(); size_t mtu() const; std::string local_addr() const; std::string remote_addr() const; State state() const { return state_; } SocketId id() const { return &socket_; } private: AsioProtonet& net_; State state_; asio::ip::udp::socket socket_; asio::ip::udp::endpoint target_ep_; asio::ip::udp::endpoint source_ep_; std::vector recv_buf_; }; #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif #endif // GCOMM_ASIO_UDP_HPP galera-3-25.3.20/gcomm/src/evs_node.cpp0000644000015300001660000000643613042054732017346 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "evs_node.hpp" #include "evs_proto.hpp" #include "evs_message2.hpp" #include std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::Node& n) { os << "{"; os << "o=" << n.operational() << ","; os << "s=" << n.suspected() << ","; os << "i=" << n.installed() << ","; os << "fs=" << n.fifo_seq() << ","; if (n.join_message() != 0) { os << "jm=\n" << *n.join_message() << ",\n"; } if (n.leave_message() != 0) { os << "lm=\n" << *n.leave_message() << ",\n"; } os << "}"; return os; } gcomm::evs::Node::Node(const Node& n) : proto_ (n.proto_), index_ (n.index_), operational_ (n.operational_), suspected_ (n.suspected_), inactive_ (n.inactive_), committed_ (n.committed_), installed_ (n.installed_), join_message_ (n.join_message_ != 0 ? new JoinMessage(*n.join_message_) : 0), leave_message_ (n.leave_message_ != 0 ? new LeaveMessage(*n.leave_message_) : 0), delayed_list_message_ (n.delayed_list_message_ != 0 ? new DelayedListMessage(*n.delayed_list_message_) : 0), tstamp_ (n.tstamp_), seen_tstamp_ (n.seen_tstamp_), fifo_seq_ (n.fifo_seq_), segment_ (n.segment_) { } gcomm::evs::Node::~Node() { delete join_message_; delete leave_message_; } void gcomm::evs::Node::set_join_message(const JoinMessage* jm) { if (join_message_ != 0) { delete join_message_; } if (jm != 0) { join_message_ = new JoinMessage(*jm); } else { join_message_ = 0; } } void gcomm::evs::Node::set_leave_message(const LeaveMessage* lm) { if (leave_message_ != 0) { delete leave_message_; } if (lm != 0) { leave_message_ = new LeaveMessage(*lm); } else { leave_message_ = 0; } } void gcomm::evs::Node::set_delayed_list_message(const DelayedListMessage* elm) { if (delayed_list_message_ != 0) { delete delayed_list_message_; } delayed_list_message_ = (elm == 0 ? 0 : new DelayedListMessage(*elm)); } bool gcomm::evs::Node::is_suspected() const { return suspected_; } bool gcomm::evs::Node::is_inactive() const { return inactive_; } void gcomm::evs::InspectNode::operator()(std::pair& p) const { Node& node(p.second); gu::datetime::Date now(gu::datetime::Date::now()); if (node.tstamp() + node.proto_.suspect_timeout_ < now) { if (node.suspected_ == false) { log_debug << "declaring node with index " << node.index_ << " suspected, timeout " << node.proto_.suspect_timeout_; } node.suspected_ = true; } else { node.suspected_ = false; } if (node.tstamp() + node.proto_.inactive_timeout_ < now) { if (node.inactive_ == false) { log_debug << "declaring node with index " << node.index_ << " inactive "; } node.inactive_ = true; } else { node.inactive_ = false; } } galera-3-25.3.20/gcomm/src/evs_proto.cpp0000644000015300001660000047500413042054732017565 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #ifdef PROFILE_EVS_PROTO #define GCOMM_PROFILE 1 #else #undef GCOMM_PROFILE #endif // PROFILE_EVS_PROTO #include "evs_proto.hpp" #include "evs_message2.hpp" #include "evs_input_map2.hpp" #include "gcomm/transport.hpp" #include "gcomm/conf.hpp" #include "gcomm/util.hpp" #include "defaults.hpp" #include #include #include #include #include #include using namespace std::rel_ops; // Convenience macros for debug and info logging #define evs_log_debug(__mask__) \ if ((debug_mask_ & (__mask__)) == 0) { } \ else log_debug << self_string() << ": " #define evs_log_info(__mask__) \ if ((info_mask_ & (__mask__)) == 0) { } \ else log_info << self_string() << ": " gcomm::evs::Proto::Proto(gu::Config& conf, const UUID& my_uuid, SegmentId segment, const gu::URI& uri, const size_t mtu, const View* rst_view) : Protolay(conf), timers_(), version_(check_range(Conf::EvsVersion, param(conf, uri, Conf::EvsVersion, "0"), 0, GCOMM_PROTOCOL_MAX_VERSION + 1)), debug_mask_(param(conf, uri, Conf::EvsDebugLogMask, "0x1", std::hex)), info_mask_(param(conf, uri, Conf::EvsInfoLogMask, "0x0", std::hex)), last_stats_report_(gu::datetime::Date::now()), collect_stats_(true), hs_agreed_("0.0,0.0001,0.00031623,0.001,0.0031623,0.01,0.031623,0.1,0.31623,1.,3.1623,10.,31.623"), hs_safe_("0.0,0.0001,0.00031623,0.001,0.0031623,0.01,0.031623,0.1,0.31623,1.,3.1623,10.,31.623"), hs_local_causal_("0.0,0.0001,0.00031623,0.001,0.0031623,0.01,0.031623,0.1,0.31623,1.,3.1623,10.,31.623"), safe_deliv_latency_(), send_queue_s_(0), n_send_queue_s_(0), sent_msgs_(7, 0), retrans_msgs_(0), recovered_msgs_(0), recvd_msgs_(7, 0), delivered_msgs_(O_LOCAL_CAUSAL + 1), send_user_prof_ ("send_user"), send_gap_prof_ ("send_gap"), send_join_prof_ ("send_join"), send_install_prof_ ("send_install"), send_leave_prof_ ("send_leave"), consistent_prof_ ("consistent"), consensus_prof_ ("consensus"), shift_to_prof_ ("shift_to"), input_map_prof_ ("input_map"), delivery_prof_ ("delivery"), delivering_(false), my_uuid_(my_uuid), segment_(segment), known_(), self_i_(), view_forget_timeout_( check_range(Conf::EvsViewForgetTimeout, param( conf, uri, Conf::EvsViewForgetTimeout, Defaults::EvsViewForgetTimeout), gu::from_string( Defaults::EvsViewForgetTimeoutMin), gu::datetime::Period::max())), inactive_timeout_( check_range(Conf::EvsInactiveTimeout, param( conf, uri, Conf::EvsInactiveTimeout, Defaults::EvsInactiveTimeout), gu::from_string( Defaults::EvsInactiveTimeoutMin), gu::datetime::Period::max())), suspect_timeout_( check_range(Conf::EvsSuspectTimeout, param( conf, uri, Conf::EvsSuspectTimeout, Defaults::EvsSuspectTimeout), gu::from_string( Defaults::EvsSuspectTimeoutMin), gu::datetime::Period::max())), inactive_check_period_( check_range(Conf::EvsInactiveCheckPeriod, param( conf, uri, Conf::EvsInactiveCheckPeriod, Defaults::EvsInactiveCheckPeriod), gu::datetime::Period::min(), suspect_timeout_/2 + 1)), retrans_period_( check_range(Conf::EvsKeepalivePeriod, param( conf, uri, Conf::EvsKeepalivePeriod, Defaults::EvsRetransPeriod), gu::from_string( Defaults::EvsRetransPeriodMin), suspect_timeout_/3 + 1)), install_timeout_( check_range(Conf::EvsInstallTimeout, param( conf, uri, Conf::EvsInstallTimeout, gu::to_string(inactive_timeout_/2)), retrans_period_, inactive_timeout_ + 1)), join_retrans_period_( check_range(Conf::EvsJoinRetransPeriod, param( conf, uri, Conf::EvsJoinRetransPeriod, Defaults::EvsJoinRetransPeriod), gu::from_string( Defaults::EvsRetransPeriodMin), gu::datetime::Period::max())), stats_report_period_( check_range(Conf::EvsStatsReportPeriod, param( conf, uri, Conf::EvsStatsReportPeriod, Defaults::EvsStatsReportPeriod), gu::from_string( Defaults::EvsStatsReportPeriodMin), gu::datetime::Period::max())), causal_keepalive_period_(retrans_period_), delay_margin_(param( conf, uri, Conf::EvsDelayMargin, Defaults::EvsDelayMargin)), delayed_keep_period_(param( conf, uri, Conf::EvsDelayedKeepPeriod, Defaults::EvsDelayedKeepPeriod)), last_inactive_check_ (gu::datetime::Date::now()), last_causal_keepalive_ (gu::datetime::Date::now()), current_view_(0, ViewId(V_TRANS, my_uuid, rst_view ? rst_view -> id().seq() + 1 : 0)), previous_view_(), previous_views_(), gather_views_(), input_map_(new InputMap()), causal_queue_(), consensus_(*this, known_, *input_map_, current_view_), install_message_(0), max_view_id_seq_(0), attempt_seq_(1), new_view_logged_(false), max_install_timeouts_( check_range(Conf::EvsMaxInstallTimeouts, param(conf, uri, Conf::EvsMaxInstallTimeouts, Defaults::EvsMaxInstallTimeouts), 0, std::numeric_limits::max())), install_timeout_count_(0), fifo_seq_(-1), last_sent_(-1), send_window_( check_range(Conf::EvsSendWindow, param(conf, uri, Conf::EvsSendWindow, Defaults::EvsSendWindow), gu::from_string(Defaults::EvsSendWindowMin), std::numeric_limits::max())), user_send_window_( check_range(Conf::EvsUserSendWindow, param(conf, uri, Conf::EvsUserSendWindow, Defaults::EvsUserSendWindow), gu::from_string(Defaults::EvsUserSendWindowMin), send_window_ + 1)), output_(), send_buf_(), max_output_size_(128), mtu_(mtu), use_aggregate_(param(conf, uri, Conf::EvsUseAggregate, "true")), self_loopback_(false), state_(S_CLOSED), shift_to_rfcnt_(0), pending_leave_(false), isolation_end_(gu::datetime::Date::zero()), delayed_list_(), auto_evict_(param(conf, uri, Conf::EvsAutoEvict, Defaults::EvsAutoEvict)) { log_info << "EVS version " << version_; conf.set(Conf::EvsVersion, gu::to_string(version_)); conf.set(Conf::EvsViewForgetTimeout, gu::to_string(view_forget_timeout_)); conf.set(Conf::EvsSuspectTimeout, gu::to_string(suspect_timeout_)); conf.set(Conf::EvsInactiveTimeout, gu::to_string(inactive_timeout_)); conf.set(Conf::EvsKeepalivePeriod, gu::to_string(retrans_period_)); conf.set(Conf::EvsInactiveCheckPeriod, gu::to_string(inactive_check_period_)); conf.set(Conf::EvsJoinRetransPeriod, gu::to_string(join_retrans_period_)); conf.set(Conf::EvsInstallTimeout, gu::to_string(install_timeout_)); conf.set(Conf::EvsStatsReportPeriod, gu::to_string(stats_report_period_)); conf.set(Conf::EvsCausalKeepalivePeriod, gu::to_string(causal_keepalive_period_)); conf.set(Conf::EvsSendWindow, gu::to_string(send_window_)); conf.set(Conf::EvsUserSendWindow, gu::to_string(user_send_window_)); conf.set(Conf::EvsUseAggregate, gu::to_string(use_aggregate_)); conf.set(Conf::EvsDebugLogMask, gu::to_string(debug_mask_, std::hex)); conf.set(Conf::EvsInfoLogMask, gu::to_string(info_mask_, std::hex)); conf.set(Conf::EvsMaxInstallTimeouts, gu::to_string(max_install_timeouts_)); conf.set(Conf::EvsDelayMargin, gu::to_string(delay_margin_)); conf.set(Conf::EvsDelayedKeepPeriod, gu::to_string(delayed_keep_period_)); conf.set(Conf::EvsAutoEvict, gu::to_string(auto_evict_)); // known_.insert_unique( std::make_pair(my_uuid_, Node(*this))); self_i_ = known_.begin(); assert(NodeMap::value(self_i_).operational() == true); NodeMap::value(self_i_).set_index(0); input_map_->reset(1); current_view_.add_member(my_uuid_, segment_); // we don't need to store previous views, do we ? if (rst_view) { previous_view_ = *rst_view; previous_views_.insert( std::make_pair(rst_view -> id(), gu::datetime::Date::now())); } if (mtu_ != std::numeric_limits::max()) { send_buf_.reserve(mtu_); } } gcomm::evs::Proto::~Proto() { output_.clear(); delete install_message_; delete input_map_; } bool gcomm::evs::Proto::set_param(const std::string& key, const std::string& val) { if (key == gcomm::Conf::EvsVersion) { version_ = check_range(Conf::EvsVersion, gu::from_string(val), 0, GCOMM_PROTOCOL_MAX_VERSION + 1); conf_.set(Conf::EvsVersion, gu::to_string(version_)); // trigger configuration change to propagate version shift_to(S_GATHER, true); return true; } else if (key == gcomm::Conf::EvsSendWindow) { send_window_ = check_range(Conf::EvsSendWindow, gu::from_string(val), user_send_window_, std::numeric_limits::max()); conf_.set(Conf::EvsSendWindow, gu::to_string(send_window_)); return true; } else if (key == gcomm::Conf::EvsUserSendWindow) { user_send_window_ = check_range( Conf::EvsUserSendWindow, gu::from_string(val), gu::from_string(Defaults::EvsUserSendWindowMin), send_window_ + 1); conf_.set(Conf::EvsUserSendWindow, gu::to_string(user_send_window_)); return true; } else if (key == gcomm::Conf::EvsMaxInstallTimeouts) { max_install_timeouts_ = check_range( Conf::EvsMaxInstallTimeouts, gu::from_string(val), 0, std::numeric_limits::max()); conf_.set(Conf::EvsMaxInstallTimeouts, gu::to_string(max_install_timeouts_)); return true; } else if (key == Conf::EvsStatsReportPeriod) { stats_report_period_ = check_range( Conf::EvsStatsReportPeriod, gu::from_string(val), gu::from_string(Defaults::EvsStatsReportPeriodMin), gu::datetime::Period::max()); conf_.set(Conf::EvsStatsReportPeriod, gu::to_string(stats_report_period_)); reset_timer(T_STATS); return true; } else if (key == Conf::EvsInfoLogMask) { info_mask_ = gu::from_string(val, std::hex); conf_.set(Conf::EvsInfoLogMask, gu::to_string(info_mask_, std::hex)); return true; } else if (key == Conf::EvsDebugLogMask) { debug_mask_ = gu::from_string(val, std::hex); conf_.set(Conf::EvsDebugLogMask, gu::to_string(debug_mask_, std::hex)); return true; } else if (key == Conf::EvsSuspectTimeout) { suspect_timeout_ = check_range( Conf::EvsSuspectTimeout, gu::from_string(val), gu::from_string(Defaults::EvsSuspectTimeoutMin), gu::datetime::Period::max()); conf_.set(Conf::EvsSuspectTimeout, gu::to_string(suspect_timeout_)); reset_timer(T_INACTIVITY); return true; } else if (key == Conf::EvsInactiveTimeout) { inactive_timeout_ = check_range( Conf::EvsInactiveTimeout, gu::from_string(val), gu::from_string(Defaults::EvsInactiveTimeoutMin), gu::datetime::Period::max()); conf_.set(Conf::EvsInactiveTimeout, gu::to_string(inactive_timeout_)); reset_timer(T_INACTIVITY); return true; } else if (key == Conf::EvsKeepalivePeriod) { retrans_period_ = check_range( Conf::EvsKeepalivePeriod, gu::from_string(val), gu::from_string(Defaults::EvsRetransPeriodMin), gu::datetime::Period::max()); conf_.set(Conf::EvsKeepalivePeriod, gu::to_string(retrans_period_)); reset_timer(T_RETRANS); return true; } else if (key == Conf::EvsCausalKeepalivePeriod) { causal_keepalive_period_ = check_range( Conf::EvsCausalKeepalivePeriod, gu::from_string(val), gu::datetime::Period(0), gu::datetime::Period::max()); conf_.set(Conf::EvsCausalKeepalivePeriod, gu::to_string(causal_keepalive_period_)); // no timer reset here, causal keepalives don't rely on timer return true; } else if (key == Conf::EvsJoinRetransPeriod) { join_retrans_period_ = check_range( Conf::EvsJoinRetransPeriod, gu::from_string(val), gu::from_string(Defaults::EvsRetransPeriodMin), gu::datetime::Period::max()); conf_.set(Conf::EvsJoinRetransPeriod, gu::to_string(join_retrans_period_)); reset_timer(T_RETRANS); return true; } else if (key == Conf::EvsInstallTimeout) { install_timeout_ = check_range( Conf::EvsInstallTimeout, gu::from_string(val), retrans_period_*2, inactive_timeout_ + 1); conf_.set(Conf::EvsInstallTimeout, gu::to_string(install_timeout_)); reset_timer(T_INSTALL); return true; } else if (key == Conf::EvsUseAggregate) { use_aggregate_ = gu::from_string(val); conf_.set(Conf::EvsUseAggregate, gu::to_string(use_aggregate_)); return true; } else if (key == Conf::EvsDelayMargin) { delay_margin_ = gu::from_string(val); conf_.set(Conf::EvsDelayMargin, gu::to_string(delay_margin_)); return true; } else if (key == Conf::EvsDelayedKeepPeriod) { delayed_keep_period_ = gu::from_string(val); conf_.set(Conf::EvsDelayedKeepPeriod, gu::to_string(delayed_keep_period_)); return true; } else if (key == Conf::EvsEvict) { if (val.size()) { UUID uuid; std::istringstream is(val); uuid.read_stream(is); log_info << "Evicting node " << uuid << " permanently from cluster"; evict(uuid); if (state() == S_OPERATIONAL && current_view_.is_member(uuid) == true) { shift_to(S_GATHER, true); } } else { Protolay::EvictList::const_iterator i, i_next; for (i = evict_list().begin(); i != evict_list().end(); i = i_next) { i_next = i, ++i_next; log_info << "unevicting " << Protolay::EvictList::key(i); unevict(Protolay::EvictList::key(i)); } } return true; } else if (key == Conf::EvsAutoEvict) { auto_evict_ = gu::from_string(val); conf_.set(Conf::EvsAutoEvict, gu::to_string(auto_evict_)); return true; } else if (key == Conf::EvsViewForgetTimeout || key == Conf::EvsInactiveCheckPeriod) { gu_throw_error(EPERM) << "can't change value for '" << key << "' during runtime"; } return false; } void gcomm::evs::Proto::handle_get_status(gu::Status& status) const { status.insert("evs_state", to_string(state_)); status.insert("evs_repl_latency", safe_deliv_latency_.to_string()); std::string delayed_list_str; for (DelayedList::const_iterator i(delayed_list_.begin()); i != delayed_list_.end(); ++i) { if (is_evicted(i->first) == false || current_view_.is_member(i->first) == true) { delayed_list_str += i->first.full_str() + ":" + i->second.addr() + ":" + gu::to_string(i->second.state_change_cnt()); delayed_list_str += ","; } } // Strip trailing comma if (delayed_list_str.empty() == false) { delayed_list_str.resize(delayed_list_str.size() - 1); } status.insert("evs_delayed", delayed_list_str); std::string evict_list_str; for (Protolay::EvictList::const_iterator i(evict_list().begin()); i != evict_list().end(); ) { evict_list_str += EvictList::key(i).full_str(); if (++i != evict_list().end()) evict_list_str += ","; } status.insert("evs_evict_list", evict_list_str); if (info_mask_ & I_STATISTICS) { status.insert("evs_safe_hs", hs_safe_.to_string()); status.insert("evs_causal_hs", hs_local_causal_.to_string()); status.insert("evs_outq_avg", gu::to_string(std::fabs(double(send_queue_s_)/ double(n_send_queue_s_)))); status.insert("evs_sent_user", gu::to_string(sent_msgs_[Message::T_USER])); status.insert("evs_sent_delegate", gu::to_string(sent_msgs_[Message::T_DELEGATE])); status.insert("evs_sent_gap", gu::to_string(sent_msgs_[Message::T_GAP])); status.insert("evs_sent_join", gu::to_string(sent_msgs_[Message::T_JOIN])); status.insert("evs_sent_install", gu::to_string(sent_msgs_[Message::T_INSTALL])); status.insert("evs_sent_leave", gu::to_string(sent_msgs_[Message::T_LEAVE])); status.insert("evs_retransmitted", gu::to_string(retrans_msgs_)); status.insert("evs_recovered", gu::to_string(recovered_msgs_)); status.insert("evs_deliv_safe", gu::to_string(delivered_msgs_[O_SAFE])); } } std::ostream& gcomm::evs::operator<<(std::ostream& os, const Proto& p) { os << "evs::proto(" << p.self_string() << ", " << p.to_string(p.state()) << ") {\n"; os << "current_view=" << p.current_view_ << ",\n"; os << "input_map=" << *p.input_map_ << ",\n"; os << "fifo_seq=" << p.fifo_seq_ << ",\n"; os << "last_sent=" << p.last_sent_ << ",\n"; os << "known:\n"; for (NodeMap::const_iterator i(p.known_.begin()); i != p.known_.end(); ++i) { os << NodeMap::key(i) << " at " << p.get_address(NodeMap::key(i)) << "\n"; os << NodeMap::value(i) << "\n"; } if (p.install_message_ != 0) os << "install msg=" << *p.install_message_ << "\n"; os << " }"; return os; } std::string gcomm::evs::Proto::stats() const { std::ostringstream os; os << "\n\tnodes " << current_view_.members().size(); os << "\n\tagreed deliv hist {" << hs_agreed_ << "} "; os << "\n\tsafe deliv hist {" << hs_safe_ << "} "; os << "\n\tcaus deliv hist {" << hs_local_causal_ << "} "; os << "\n\toutq avg " << double(send_queue_s_)/double(n_send_queue_s_); os << "\n\tsent {"; std::copy(sent_msgs_.begin(), sent_msgs_.end(), std::ostream_iterator(os, ",")); os << "}\n\tsent per sec {"; const double norm(double(gu::datetime::Date::now().get_utc() - last_stats_report_.get_utc())/gu::datetime::Sec); std::vector result(7, norm); std::transform(sent_msgs_.begin(), sent_msgs_.end(), result.begin(), result.begin(), std::divides()); std::copy(result.begin(), result.end(), std::ostream_iterator(os, ",")); os << "}\n\trecvd { "; std::copy(recvd_msgs_.begin(), recvd_msgs_.end(), std::ostream_iterator(os, ",")); os << "}\n\trecvd per sec {"; std::fill(result.begin(), result.end(), norm); std::transform(recvd_msgs_.begin(), recvd_msgs_.end(), result.begin(), result.begin(), std::divides()); std::copy(result.begin(), result.end(), std::ostream_iterator(os, ",")); os << "}\n\tretransmitted " << retrans_msgs_ << " "; os << "\n\trecovered " << recovered_msgs_; os << "\n\tdelivered {"; std::copy(delivered_msgs_.begin(), delivered_msgs_.end(), std::ostream_iterator(os, ", ")); os << "}\n\teff(delivered/sent) " << double(accumulate(delivered_msgs_.begin() + 1, delivered_msgs_.begin() + O_SAFE + 1, 0)) /double(accumulate(sent_msgs_.begin(), sent_msgs_.end(), 0)); return os.str(); } void gcomm::evs::Proto::reset_stats() { hs_agreed_.clear(); hs_safe_.clear(); hs_local_causal_.clear(); safe_deliv_latency_.clear(); send_queue_s_ = 0; n_send_queue_s_ = 0; last_stats_report_ = gu::datetime::Date::now(); } bool gcomm::evs::Proto::is_msg_from_previous_view(const Message& msg) { ViewList::const_iterator i; if ((i = previous_views_.find(msg.source_view_id())) != previous_views_.end()) { evs_log_debug(D_FOREIGN_MSGS) << " message " << msg << " from previous view " << i->first; return true; } // If node is in current view, check message source view seq, if it is // smaller than current view seq then the message is also from some // previous (but unknown to us) view NodeList::const_iterator ni(current_view_.members().find(msg.source())); if (ni != current_view_.members().end()) { if (msg.source_view_id().seq() < current_view_.id().seq()) { log_warn << "stale message from unknown origin " << msg; return true; } } return false; } void gcomm::evs::Proto::handle_inactivity_timer() { gu_trace(check_inactive()); gu_trace(cleanup_views()); gu_trace(cleanup_evicted()); } void gcomm::evs::Proto::handle_retrans_timer() { evs_log_debug(D_TIMERS) << "retrans timer"; if (state() == S_GATHER) { if (install_message_ != 0) { // Retransmit install message if representative and all commit // gaps have not been received yet. if (is_all_committed() == false && install_message_->source() == uuid()) { evs_log_debug(D_INSTALL_MSGS) << "retrans install"; gu::Buffer buf; install_message_->set_flags( install_message_->flags() | Message::F_RETRANS); (void)serialize(*install_message_, buf); Datagram dg(buf); // Must not be sent as delegate, newly joining node // will filter them out in handle_msg(). gu_trace(send_down(dg, ProtoDownMeta())); } evs_log_debug(D_GAP_MSGS) << "resend commit gap"; // Resend commit gap gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range(), true)); } else { evs_log_debug(D_JOIN_MSGS) << "retrans join"; gu_trace(send_join(true)); } } else if (state() == S_INSTALL) { gcomm_assert(install_message_ != 0); gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range(), true)); gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range())); } else if (state() == S_OPERATIONAL) { const seqno_t prev_last_sent(last_sent_); evs_log_debug(D_TIMERS) << "send user timer, last_sent=" << last_sent_; Datagram dg; gu_trace((void)send_user(dg, 0xff, O_DROP, -1, -1)); if (prev_last_sent == last_sent_) { log_warn << "could not send keepalive"; } } else if (state() == S_LEAVING) { evs_log_debug(D_TIMERS) << "send leave timer"; profile_enter(send_leave_prof_); send_leave(false); profile_leave(send_leave_prof_); } } void gcomm::evs::Proto::isolate(gu::datetime::Period period) { isolation_end_ = gu::datetime::Date::now() + period; } void gcomm::evs::Proto::handle_install_timer() { gcomm_assert(state() == S_GATHER || state() == S_INSTALL); log_warn << self_string() << " install timer expired"; bool is_cons(consensus_.is_consensus()); bool is_repr(is_representative(uuid())); evs_log_info(I_STATE) << "before inspection:"; evs_log_info(I_STATE) << "consensus: " << is_cons; evs_log_info(I_STATE) << "repr : " << is_repr; evs_log_info(I_STATE) << "state dump for diagnosis:"; std::cerr << *this << std::endl; if (install_timeout_count_ < max_install_timeouts_ ) { // before reaching max_install_timeouts, declare only inconsistent // nodes as inactive for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (node_uuid != uuid() && (node.join_message() == 0 || consensus_.is_consistent(*node.join_message()) == false)) { evs_log_info(I_STATE) << " setting source " << NodeMap::key(i) << " as inactive due to expired install timer"; set_inactive(NodeMap::key(i)); } } } else if (install_timeout_count_ == max_install_timeouts_) { // max install timeouts reached, declare all other nodes // as inactive for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::key(i) != uuid()) { evs_log_info(I_STATE) << " setting source " << NodeMap::key(i) << " as inactive due to expired install timer"; set_inactive(NodeMap::key(i)); } } log_info << "max install timeouts reached, will isolate node " << "for " << suspect_timeout_ + inactive_timeout_; isolate(suspect_timeout_ + inactive_timeout_); } else if (install_timeout_count_ > max_install_timeouts_) { log_info << "going to give up, state dump for diagnosis:"; std::cerr << *this << std::endl; gu_throw_fatal << self_string() << " failed to form singleton view after exceeding " << "max_install_timeouts " << max_install_timeouts_ << ", giving up"; } if (install_message_ != 0) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::value(i).committed() == false) { log_info << self_string() << " node " << NodeMap::key(i) << " failed to commit for install message, " << "declaring inactive"; if (NodeMap::key(i) != uuid()) { set_inactive(NodeMap::key(i)); } } } } else { log_info << "no install message received"; } shift_to(S_GATHER, true); is_cons = consensus_.is_consensus(); is_repr = is_representative(uuid()); evs_log_info(I_STATE) << "after inspection:"; evs_log_info(I_STATE) << "consensus: " << is_cons; evs_log_info(I_STATE) << "repr : " << is_repr; if (is_cons == true && is_repr == true) { send_install(EVS_CALLER); } install_timeout_count_++; } void gcomm::evs::Proto::handle_stats_timer() { reset_stats(); #ifdef GCOMM_PROFILE evs_log_info(I_PROFILING) << "\nprofiles:\n"; evs_log_info(I_PROFILING) << send_user_prof_ << "\n"; evs_log_info(I_PROFILING) << send_gap_prof_ << "\n"; evs_log_info(I_PROFILING) << send_join_prof_ << "\n"; evs_log_info(I_PROFILING) << send_install_prof_ << "\n"; evs_log_info(I_PROFILING) << send_leave_prof_ << "\n"; evs_log_info(I_PROFILING) << consistent_prof_ << "\n"; evs_log_info(I_PROFILING) << consensus_prof_ << "\n"; evs_log_info(I_PROFILING) << shift_to_prof_ << "\n"; evs_log_info(I_PROFILING) << input_map_prof_ << "\n"; evs_log_info(I_PROFILING) << delivery_prof_ << "\n"; #endif // GCOMM_PROFILE } class TimerSelectOp { public: TimerSelectOp(const gcomm::evs::Proto::Timer t_) : t(t_) { } bool operator()(const gcomm::evs::Proto::TimerList::value_type& vt) const { return (gcomm::evs::Proto::TimerList::value(vt) == t); } private: gcomm::evs::Proto::Timer const t; }; gu::datetime::Date gcomm::evs::Proto::next_expiration(const Timer t) const { gcomm_assert(state() != S_CLOSED); gu::datetime::Date now(gu::datetime::Date::now()); switch (t) { case T_INACTIVITY: return (now + inactive_check_period_); case T_RETRANS: switch (state()) { case S_OPERATIONAL: case S_LEAVING: return (now + retrans_period_); case S_GATHER: case S_INSTALL: return (now + join_retrans_period_); default: gu_throw_fatal; } case T_INSTALL: switch (state()) { case S_GATHER: case S_INSTALL: return (now + install_timeout_); default: return gu::datetime::Date::max(); } case T_STATS: return (now + stats_report_period_); } gu_throw_fatal; } void timer_list_erase_by_type(gcomm::evs::Proto::TimerList& timer_list, gcomm::evs::Proto::Timer timer) { gcomm::evs::Proto::TimerList::iterator i, i_next; for (i = timer_list.begin(); i != timer_list.end(); i = i_next) { i_next = i, ++i_next; if (gcomm::evs::Proto::TimerList::value(i) == timer) { timer_list.erase(i); } } } void gcomm::evs::Proto::reset_timer(Timer t) { timer_list_erase_by_type(timers_, t); timers_.insert(std::make_pair(next_expiration(t), t)); } void gcomm::evs::Proto::cancel_timer(Timer t) { timer_list_erase_by_type(timers_, t); } gu::datetime::Date gcomm::evs::Proto::handle_timers() { gu::datetime::Date now(gu::datetime::Date::now()); while (timers_.empty() == false && TimerList::key(timers_.begin()) <= now) { Timer t(TimerList::value(timers_.begin())); timers_.erase(timers_.begin()); switch (t) { case T_INACTIVITY: handle_inactivity_timer(); break; case T_RETRANS: handle_retrans_timer(); break; case T_INSTALL: handle_install_timer(); break; case T_STATS: handle_stats_timer(); break; } if (state() == S_CLOSED) { return gu::datetime::Date::max(); } reset_timer(t); } if (timers_.empty() == true) { evs_log_debug(D_TIMERS) << "no timers set"; return gu::datetime::Date::max(); } return TimerList::key(timers_.begin()); } void gcomm::evs::Proto::check_inactive() { const gu::datetime::Date now(gu::datetime::Date::now()); if (last_inactive_check_ + inactive_check_period_*3 < now) { log_warn << "last inactive check more than " << inactive_check_period_*3 << " ago (" << (now - last_inactive_check_) << "), skipping check"; last_inactive_check_ = now; return; } NodeMap::value(self_i_).set_tstamp(gu::datetime::Date::now()); std::for_each(known_.begin(), known_.end(), InspectNode()); bool has_inactive(false); size_t n_suspected(0); bool do_send_delayed_list(false); // Iterate over known nodes and check inactive/suspected/delayed status for (NodeMap::iterator i(known_.begin()); i != known_.end(); ++i) { if (i == self_i_) continue; // No need to check self const UUID& node_uuid(NodeMap::key(i)); Node& node(NodeMap::value(i)); if (node_uuid != uuid() && (node.is_inactive() == true || node.is_suspected() == true )) { if (node.operational() == true && node.is_inactive() == true) { log_info << self_string() << " detected inactive node: " << node_uuid; } else if (node.is_suspected() == true && node.is_inactive() == false) { log_info << self_string() << " suspecting node: " << node_uuid; } if (node.is_inactive() == true) { set_inactive(node_uuid); } if (node.is_suspected() == true && node.operational() == true) { ++n_suspected; if (node.join_message() == 0) { log_info << self_string() << " suspected node without join message, declaring inactive"; set_inactive(node_uuid); } } has_inactive = true; } DelayedList::iterator dli(delayed_list_.find(node_uuid)); if (node.seen_tstamp() + retrans_period_ + delay_margin_ <= now) { if (node.index() != std::numeric_limits::max()) { // Delayed node in group, check input map state and request // message recovery if necessary Range range(input_map_->range(node.index())); evs_log_info(I_STATE) << "delayed " << node_uuid << " requesting range " << Range(range.lu(), last_sent_); if (last_sent_ >= range.lu()) { // Request recovering message from all nodes (indicated // by last arg) to increase probablity of receiving the // message. gu_trace(send_gap(EVS_CALLER, node_uuid, current_view_.id(), Range(range.lu(), last_sent_), false, true)); } } if (dli == delayed_list_.end()) { delayed_list_.insert( std::make_pair(node_uuid, DelayedEntry(get_address(node_uuid)))); } else { dli->second.set_tstamp(now); dli->second.set_state(DelayedEntry::S_DELAYED, delayed_keep_period_, now); evs_log_debug(D_STATE) << "set '" << dli->first << "' delayed state to S_DELAYED , cnt = " << dli->second.state_change_cnt(); // todo(dirlt): make threshold as a configurable variable ? if (dli->second.state_change_cnt() > 0) { do_send_delayed_list = true; } } } else if (dli != delayed_list_.end()) { const size_t prev_cnt(dli->second.state_change_cnt()); dli->second.set_state(DelayedEntry::S_OK, delayed_keep_period_, now); if (prev_cnt != dli->second.state_change_cnt()) { dli->second.set_tstamp(now); } evs_log_debug(D_STATE) << "set '" << dli->first << "' delayed state to S_OK. prev_cnt = " << prev_cnt << ", cur_cnt = " << dli->second.state_change_cnt(); if (dli->second.state_change_cnt() > 0) { do_send_delayed_list = true; } } } // Clean up delayed list and evict list messages { DelayedList::iterator i, i_next; for (i = delayed_list_.begin(); i != delayed_list_.end(); i = i_next) { i_next = i, ++i_next; // State change count has decayed back to zero // or node is already evicted and not in the current view // anymore. if ((i->second.state_change_cnt() == 0 && i->second.state() == DelayedEntry::S_OK) || (is_evicted(i->first) == true && current_view_.is_member(i->first) == false)) { log_debug << "remove '" << i->first << "' from delayed_list"; delayed_list_.erase(i); } } for (NodeMap::iterator i(known_.begin()); i != known_.end(); ++i) { Node& node(NodeMap::value(i)); const DelayedListMessage* const elm(node.delayed_list_message()); if (elm != 0 && elm->tstamp() + delayed_keep_period_ < now) { log_debug << "discarding expired elm from " << elm->source(); node.set_delayed_list_message(0); } } } if (current_view_.version() > 0 && do_send_delayed_list == true && auto_evict_ > 0) { send_delayed_list(); } // All other nodes are under suspicion, set all others as inactive. // This will speed up recovery when this node has been isolated from // other group. Note that this should be done only if known size is // greater than 2 in order to avoid immediate split brain. if (known_.size() > 2 && n_suspected + 1 == known_.size()) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::key(i) != uuid()) { evs_log_info(I_STATE) << " setting source " << NodeMap::key(i) << " inactive (other nodes under suspicion)"; set_inactive(NodeMap::key(i)); } } } if (has_inactive == true && state() == S_OPERATIONAL) { profile_enter(shift_to_prof_); gu_trace(shift_to(S_GATHER, true)); profile_leave(shift_to_prof_); } else if (has_inactive == true && state() == S_LEAVING && n_operational() == 1) { profile_enter(shift_to_prof_); gu_trace(shift_to(S_CLOSED)); profile_leave(shift_to_prof_); } last_inactive_check_ = now; // Check if isolation period has ended if (isolation_end_ != gu::datetime::Date::zero() && isolation_end_ <= now) { log_info << "ending isolation"; isolation_end_ = gu::datetime::Date::zero(); } } void gcomm::evs::Proto::set_inactive(const UUID& node_uuid) { NodeMap::iterator i; gcomm_assert(node_uuid != uuid()); gu_trace(i = known_.find_checked(node_uuid)); evs_log_debug(D_STATE) << "setting " << node_uuid << " inactive"; Node& node(NodeMap::value(i)); node.set_tstamp(gu::datetime::Date::zero()); node.set_join_message(0); // node.set_leave_message(0); node.set_operational(false); } bool gcomm::evs::Proto::is_inactive(const UUID& uuid) const { NodeMap::const_iterator i; gu_trace(i = known_.find_checked(uuid)); const Node& node(NodeMap::value(i)); return (node.operational() == false); } void gcomm::evs::Proto::cleanup_foreign(const InstallMessage& im) { NodeMap::iterator i, i_next; for (i = known_.begin(); i != known_.end(); i = i_next) { const UUID& uuid(NodeMap::key(i)); i_next = i, ++i_next; const MessageNodeList::const_iterator mni(im.node_list().find(uuid)); if (mni == im.node_list().end() || MessageNodeList::value(mni).operational() == false) { known_.erase(i); } } } void gcomm::evs::Proto::cleanup_views() { gu::datetime::Date now(gu::datetime::Date::now()); ViewList::iterator i, i_next; for (i = previous_views_.begin(); i != previous_views_.end(); i = i_next) { i_next = i, ++i_next; if (i->second + view_forget_timeout_ <= now) { evs_log_debug(D_STATE) << " erasing view: " << i->first; previous_views_.erase(i); } } } void gcomm::evs::Proto::cleanup_evicted() { gu::datetime::Date now(gu::datetime::Date::now()); Protolay::EvictList::const_iterator i, i_next; for (i = evict_list().begin(); i != evict_list().end(); i = i_next) { i_next = i, ++i_next; if (Protolay::EvictList::value(i) + view_forget_timeout_ <= now) { log_info << "unevicting " << Protolay::EvictList::key(i); unevict(Protolay::EvictList::key(i)); } } } size_t gcomm::evs::Proto::n_operational() const { NodeMap::const_iterator i; size_t ret = 0; for (i = known_.begin(); i != known_.end(); ++i) { if (i->second.operational() == true) ret++; } return ret; } void gcomm::evs::Proto::deliver_reg_view(const InstallMessage& im, const View& prev_view) { View view(im.version(), im.install_view_id()); for (MessageNodeList::const_iterator i(im.node_list().begin()); i != im.node_list().end(); ++i) { const UUID& uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); // 1) Operational nodes will be members of new view // 2) Operational nodes that were not present in previous // view are going also to joined set // 3) Leaving nodes go to left set // 4) All other nodes present in previous view but not in // member of left set are considered partitioned if (mn.operational() == true) { view.add_member(uuid, mn.segment()); if (prev_view.is_member(uuid) == false) { view.add_joined(uuid, mn.segment()); } } else if (mn.leaving() == true) { view.add_left(uuid, mn.segment()); } else { // Partitioned set is constructed after this loop } // If node has been evicted, it should have been added to // evicted list via JOIN messages. assert(mn.evicted() == false || is_evicted(uuid) == true); } // Loop over previous view and add each node not in new view // member of left set as partitioned. for (NodeList::const_iterator i(prev_view.members().begin()); i != prev_view.members().end(); ++i) { const UUID& uuid(NodeList::key(i)); const gcomm::Node& mn(NodeList::value(i)); if (view.is_member(uuid) == false && view.is_leaving(uuid) == false) { view.add_partitioned(uuid, mn.segment()); } } evs_log_info(I_VIEWS) << "delivering view " << view; // This node must be a member of the view it delivers and // view id UUID must be of one of the members. gcomm_assert(view.is_member(uuid()) == true); gcomm_assert(view.is_member(view.id().uuid()) == true) << "view id UUID " << view.id().uuid() << " not found from reg view members " << view.members() << " must abort to avoid possibility of two groups " << "with the same view id"; set_stable_view(view); ProtoUpMeta up_meta(UUID::nil(), ViewId(), &view); send_up(Datagram(), up_meta); } void gcomm::evs::Proto::deliver_trans_view(const InstallMessage& im, const View& curr_view) { // Trans view is intersection of members in curr_view // and members going to be in the next view that come from // curr_view according to install message View view(current_view_.version(), ViewId(V_TRANS, curr_view.id().uuid(), curr_view.id().seq())); for (MessageNodeList::const_iterator i(im.node_list().begin()); i != im.node_list().end(); ++i) { const UUID& uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); if (curr_view.id() == mn.view_id() && curr_view.is_member(uuid) == true) { // 1) Operational nodes go to next view // 2) Leaving nodes go to left set // 3) All other nodes present in previous view but not in // member of left set are considered partitioned if (mn.operational() == true) { view.add_member(uuid, mn.segment()); } else if (mn.leaving() == true) { view.add_left(uuid, mn.segment()); } else { // Partitioned set is constructed after this loop } } } // Loop over current view and add each node not in new view // member of left set as partitioned. for (NodeList::const_iterator i(curr_view.members().begin()); i != curr_view.members().end(); ++i) { const UUID& uuid(NodeList::key(i)); const gcomm::Node& mn(NodeList::value(i)); if (view.is_member(uuid) == false && view.is_leaving(uuid) == false) { view.add_partitioned(uuid, mn.segment()); } } // This node must be a member of the view it delivers and // if the view is the last transitional, view must have // exactly one member and no-one in left set. gcomm_assert(view.is_member(uuid()) == true); evs_log_info(I_VIEWS) << " delivering view " << view; ProtoUpMeta up_meta(UUID::nil(), ViewId(), &view); gu_trace(send_up(Datagram(), up_meta)); } void gcomm::evs::Proto::deliver_empty_view() { View view(0, V_REG); evs_log_info(I_VIEWS) << "delivering view " << view; ProtoUpMeta up_meta(UUID::nil(), ViewId(), &view); send_up(Datagram(), up_meta); } void gcomm::evs::Proto::setall_committed(bool val) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { NodeMap::value(i).set_committed(val); } } // Check if commit gaps from all known nodes found from install message have // been seen. bool gcomm::evs::Proto::is_all_committed() const { gcomm_assert(install_message_ != 0); for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& inst(NodeMap::value(i)); if (install_message_->node_list().find(uuid) != install_message_->node_list().end() && inst.operational() == true && inst.committed() == false) { return false; } } return true; } void gcomm::evs::Proto::setall_installed(bool val) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { NodeMap::value(i).set_installed(val); } } // Check if gaps from new view from all known nodes found from install // message have been seen. bool gcomm::evs::Proto::is_all_installed() const { gcomm_assert(install_message_ != 0); for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& inst(NodeMap::value(i)); if (install_message_->node_list().find(uuid) != install_message_->node_list().end() && inst.operational() == true && inst.installed() == false) { return false; } } return true; } void gcomm::evs::Proto::cleanup_joins() { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { NodeMap::value(i).set_join_message(0); } } bool gcomm::evs::Proto::is_representative(const UUID& uuid) const { for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::value(i).operational() == true && NodeMap::value(i).is_inactive() == false) { assert(NodeMap::value(i).leave_message() == 0); if (NodeMap::value(i).leave_message() != 0) { log_warn << "operational node " << NodeMap::key(i) << " with leave message: " << NodeMap::value(i); continue; } return (uuid == NodeMap::key(i)); } } return false; } bool gcomm::evs::Proto::is_all_suspected(const UUID& uuid) const { for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const Node& node(NodeMap::value(i)); if (node.operational() == true) { const JoinMessage* jm(node.join_message()); if (!jm) return false; const MessageNodeList::const_iterator j(jm->node_list().find(uuid)); if (!(j != jm->node_list().end() && MessageNodeList::value(j).suspected())) return false; } } return true; } ///////////////////////////////////////////////////////////////////////////// // Message sending ///////////////////////////////////////////////////////////////////////////// bool gcomm::evs::Proto::is_flow_control(const seqno_t seq, const seqno_t win) const { gcomm_assert(seq != -1 && win != -1); const seqno_t base(input_map_->safe_seq()); if (seq > base + win) { return true; } return false; } int gcomm::evs::Proto::send_user(Datagram& dg, uint8_t const user_type, Order const order, seqno_t const win, seqno_t const up_to_seqno, size_t const n_aggregated) { assert(state() == S_LEAVING || state() == S_GATHER || state() == S_OPERATIONAL); assert(dg.offset() == 0); assert(n_aggregated == 1 || output_.size() >= n_aggregated); gcomm_assert(up_to_seqno == -1 || up_to_seqno >= last_sent_); gcomm_assert(up_to_seqno == -1 || win == -1); int ret; const seqno_t seq(last_sent_ + 1); if (win != -1 && is_flow_control(seq, win) == true) { return EAGAIN; } // seq_range max 0xff because of Message seq_range_ field limitation seqno_t seq_range( std::min(up_to_seqno == -1 ? 0 : up_to_seqno - seq, evs::seqno_t(0xff))); seqno_t last_msg_seq(seq + seq_range); uint8_t flags; // If output queue wont contain messages after this patch, // up_to_seqno is given (msg completion) or flow contol would kick in // at next batch, don't set F_MSG_MORE. if (output_.size() <= n_aggregated || up_to_seqno != -1 || (win != -1 && is_flow_control(last_msg_seq + 1, win) == true)) { flags = 0; } else { flags = Message::F_MSG_MORE; } if (n_aggregated > 1) { flags |= Message::F_AGGREGATE; } // Maximize seq range in the case next message batch won't be sent // immediately. if ((flags & Message::F_MSG_MORE) == 0 && up_to_seqno == -1) { seq_range = input_map_->max_hs() - seq; seq_range = std::max(static_cast(0), seq_range); seq_range = std::min(static_cast(0xff), seq_range); if (seq_range != 0) { log_debug << "adjusted seq range to: " << seq_range; last_msg_seq = seq + seq_range; } } gcomm_assert(last_msg_seq >= seq && last_msg_seq - seq <= 0xff); gcomm_assert(seq_range >= 0 && seq_range <= 0xff); UserMessage msg(version_, uuid(), current_view_.id(), seq, input_map_->aru_seq(), seq_range, order, ++fifo_seq_, user_type, flags); // Insert first to input map to determine correct aru seq Range range; gu_trace(range = input_map_->insert(NodeMap::value(self_i_).index(), msg, dg)); gcomm_assert(range.hs() == last_msg_seq) << msg << " " << *input_map_ << " " << *this; last_sent_ = last_msg_seq; assert(range.hs() == last_sent_); update_im_safe_seq(NodeMap::value(self_i_).index(), input_map_->aru_seq()); msg.set_aru_seq(input_map_->aru_seq()); evs_log_debug(D_USER_MSGS) << " sending " << msg; gu_trace(push_header(msg, dg)); if ((ret = send_down(dg, ProtoDownMeta())) != 0) { log_debug << "send failed: " << strerror(ret); } gu_trace(pop_header(msg, dg)); sent_msgs_[Message::T_USER]++; if (delivering_ == false) { gu_trace(deliver()); gu_trace(deliver_local()); } return 0; } size_t gcomm::evs::Proto::aggregate_len() const { bool is_aggregate(false); size_t ret(0); AggregateMessage am; std::deque >::const_iterator i(output_.begin()); const Order ord(i->second.order()); ret += i->first.len() + am.serial_size(); for (++i; i != output_.end() && i->second.order() == ord; ++i) { if (ret + i->first.len() + am.serial_size() <= mtu()) { ret += i->first.len() + am.serial_size(); is_aggregate = true; } else { break; } } evs_log_debug(D_USER_MSGS) << "is aggregate " << is_aggregate << " ret " << ret; return (is_aggregate == true ? ret : 0); } int gcomm::evs::Proto::send_user(const seqno_t win) { gcomm_assert(output_.empty() == false); gcomm_assert(state() == S_OPERATIONAL); gcomm_assert(win <= send_window_); int ret; size_t alen; if (use_aggregate_ == true && (alen = aggregate_len()) > 0) { // Messages can be aggregated into single message send_buf_.resize(alen); size_t offset(0); size_t n(0); std::deque >::iterator i(output_.begin()); Order ord(i->second.order()); while ((alen > 0 && i != output_.end())) { const Datagram& dg(i->first); const ProtoDownMeta dm(i->second); AggregateMessage am(0, dg.len(), dm.user_type()); gcomm_assert(alen >= dg.len() + am.serial_size()); gu_trace(offset = am.serialize(&send_buf_[0], send_buf_.size(), offset)); std::copy(dg.header() + dg.header_offset(), dg.header() + dg.header_size(), &send_buf_[0] + offset); offset += (dg.header_len()); std::copy(dg.payload().begin(), dg.payload().end(), &send_buf_[0] + offset); offset += dg.payload().size(); alen -= dg.len() + am.serial_size(); ++n; ++i; } Datagram dg(gu::SharedBuffer(new gu::Buffer(send_buf_.begin(), send_buf_.end()))); if ((ret = send_user(dg, 0xff, ord, win, -1, n)) == 0) { while (n-- > 0) { output_.pop_front(); } } } else { std::pair wb(output_.front()); if ((ret = send_user(wb.first, wb.second.user_type(), wb.second.order(), win, -1)) == 0) { output_.pop_front(); } } return ret; } void gcomm::evs::Proto::complete_user(const seqno_t high_seq) { gcomm_assert(state() == S_OPERATIONAL || state() == S_GATHER); evs_log_debug(D_USER_MSGS) << "completing seqno to " << high_seq;; Datagram wb; int err; profile_enter(send_user_prof_); err = send_user(wb, 0xff, O_DROP, -1, high_seq); profile_leave(send_user_prof_); if (err != 0) { log_debug << "failed to send completing msg " << strerror(err) << " seq=" << high_seq << " send_window=" << send_window_ << " last_sent=" << last_sent_; } } int gcomm::evs::Proto::send_delegate(Datagram& wb) { DelegateMessage dm(version_, uuid(), current_view_.id(), ++fifo_seq_); push_header(dm, wb); int ret = send_down(wb, ProtoDownMeta()); pop_header(dm, wb); sent_msgs_[Message::T_DELEGATE]++; return ret; } void gcomm::evs::Proto::send_gap(EVS_CALLER_ARG, const UUID& range_uuid, const ViewId& source_view_id, const Range range, const bool commit, const bool req_all) { gcomm_assert((commit == false && source_view_id == current_view_.id()) || install_message_ != 0); // TODO: Investigate if gap sending can be somehow limited, // message loss happen most probably during congestion and // flooding network with gap messages won't probably make // conditions better uint8_t flags(0); if (commit == true) flags |= Message::F_COMMIT; if (req_all) flags |= Message::F_RETRANS; GapMessage gm(version_, uuid(), source_view_id, (source_view_id == current_view_.id() ? last_sent_ : (commit == true ? install_message_->fifo_seq() : -1)), (source_view_id == current_view_.id() ? input_map_->aru_seq() : -1), ++fifo_seq_, range_uuid, range, flags); evs_log_debug(D_GAP_MSGS) << EVS_LOG_METHOD << gm; gu::Buffer buf; serialize(gm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed: " << strerror(err); } sent_msgs_[Message::T_GAP]++; gu_trace(handle_gap(gm, self_i_)); } void gcomm::evs::Proto::populate_node_list(MessageNodeList* node_list) const { for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); MessageNode mnode(node.operational(), node.suspected(), is_evicted(node_uuid)); if (node_uuid != uuid()) { const JoinMessage* jm(node.join_message()); const LeaveMessage* lm(node.leave_message()); // if (jm != 0) { const ViewId& nsv(jm->source_view_id()); const MessageNode& mn(MessageNodeList::value(jm->node_list().find_checked(node_uuid))); mnode = MessageNode(node.operational(), node.is_suspected(), node.segment(), is_evicted(node_uuid), -1, jm->source_view_id(), (nsv == current_view_.id() ? input_map_->safe_seq(node.index()) : mn.safe_seq()), (nsv == current_view_.id() ? input_map_->range(node.index()) : mn.im_range())); } else if (lm != 0) { const ViewId& nsv(lm->source_view_id()); mnode = MessageNode(node.operational(), node.is_suspected(), node.segment(), is_evicted(node_uuid), lm->seq(), nsv, (nsv == current_view_.id() ? input_map_->safe_seq(node.index()) : -1), (nsv == current_view_.id() ? input_map_->range(node.index()) : Range())); } else if (current_view_.is_member(node_uuid) == true) { mnode = MessageNode(node.operational(), node.is_suspected(), node.segment(), is_evicted(node_uuid), -1, current_view_.id(), input_map_->safe_seq(node.index()), input_map_->range(node.index())); } } else { mnode = MessageNode(true, false, node.segment(), is_evicted(node_uuid), -1, current_view_.id(), input_map_->safe_seq(node.index()), input_map_->range(node.index())); } gu_trace((void)node_list->insert_unique(std::make_pair(node_uuid, mnode))); } // Iterate over evicted_list and add evicted nodes not yet in node list. for (Protolay::EvictList::const_iterator i(evict_list().begin()); i != evict_list().end(); ++i) { if (node_list->find(Protolay::EvictList::key(i)) == node_list->end()) { // default arguments are evil. MessageNode mnode(false, false, 0, true); gu_trace((void)node_list->insert_unique( std::make_pair(Protolay::EvictList::key(i), mnode))); } } evs_log_debug(D_CONSENSUS) << "populate node list:\n" << *node_list; } const gcomm::evs::JoinMessage& gcomm::evs::Proto::create_join() { MessageNodeList node_list; gu_trace(populate_node_list(&node_list)); JoinMessage jm(version_, uuid(), current_view_.id(), input_map_->safe_seq(), input_map_->aru_seq(), ++fifo_seq_, node_list); NodeMap::value(self_i_).set_join_message(&jm); evs_log_debug(D_JOIN_MSGS) << " created join message " << jm; return *NodeMap::value(self_i_).join_message(); } void gcomm::evs::Proto::set_join(const JoinMessage& jm, const UUID& source) { NodeMap::iterator i; gu_trace(i = known_.find_checked(source)); NodeMap::value(i).set_join_message(&jm);; } void gcomm::evs::Proto::set_leave(const LeaveMessage& lm, const UUID& source) { NodeMap::iterator i; gu_trace(i = known_.find_checked(source)); Node& inst(NodeMap::value(i)); if (inst.leave_message()) { evs_log_debug(D_LEAVE_MSGS) << "Duplicate leave:\told: " << *inst.leave_message() << "\tnew: " << lm; } else { inst.set_leave_message(&lm); } } void gcomm::evs::Proto::send_join(bool handle) { assert(output_.empty() == true); JoinMessage jm(create_join()); gu::Buffer buf; serialize(jm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed: " << strerror(err); } sent_msgs_[Message::T_JOIN]++; if (handle == true) { handle_join(jm, self_i_); } } void gcomm::evs::Proto::send_leave(bool handle) { gcomm_assert(state() == S_LEAVING); // If no messages have been sent, generate one dummy to // trigger message acknowledgement mechanism if (last_sent_ == -1 && output_.empty() == true) { Datagram wb; gu_trace(send_user(wb, 0xff, O_DROP, -1, -1)); } /* Move all pending messages from output to input map */ while (output_.empty() == false) { std::pair wb = output_.front(); if (send_user(wb.first, wb.second.user_type(), wb.second.order(), -1, -1) != 0) { gu_throw_fatal << "send_user() failed"; } output_.pop_front(); } LeaveMessage lm(version_, uuid(), current_view_.id(), last_sent_, input_map_->aru_seq(), ++fifo_seq_); evs_log_debug(D_LEAVE_MSGS) << "sending leave msg " << lm; gu::Buffer buf; serialize(lm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed " << strerror(err); } sent_msgs_[Message::T_LEAVE]++; if (handle == true) { handle_leave(lm, self_i_); } } struct ViewIdCmp { bool operator()(const gcomm::evs::NodeMap::value_type& a, const gcomm::evs::NodeMap::value_type& b) const { using gcomm::evs::NodeMap; gcomm_assert(NodeMap::value(a).join_message() != 0 && NodeMap::value(b).join_message() != 0); return (NodeMap::value(a).join_message()->source_view_id().seq() < NodeMap::value(b).join_message()->source_view_id().seq()); } }; struct ProtoVerCmp { bool operator()(const gcomm::evs::NodeMap::value_type& a, const gcomm::evs::NodeMap::value_type& b) const { using gcomm::evs::NodeMap; gcomm_assert(NodeMap::value(a).join_message() != 0 && NodeMap::value(b).join_message() != 0); return (NodeMap::value(a).join_message()->version() < NodeMap::value(b).join_message()->version()); } }; void gcomm::evs::Proto::send_install(EVS_CALLER_ARG) { gcomm_assert(consensus_.is_consensus() == true && is_representative(uuid()) == true) << *this; // Select list of operational nodes from known NodeMap oper_list; for_each(known_.begin(), known_.end(), OperationalSelect(oper_list)); NodeMap::const_iterator max_node = max_element(oper_list.begin(), oper_list.end(), ViewIdCmp()); // Compute maximum known view id seq max_view_id_seq_ = std::max(max_view_id_seq_, NodeMap::value(max_node).join_message()->source_view_id().seq()); // Compute highest commonly supported protocol version. // Oper_list is non-empty, join message existence is asserted. const int version( NodeMap::value( std::min_element(oper_list.begin(), oper_list.end(), ProtoVerCmp())).join_message()->version()); MessageNodeList node_list; populate_node_list(&node_list); InstallMessage imsg(version, uuid(), current_view_.id(), ViewId(V_REG, uuid(), max_view_id_seq_ + attempt_seq_), input_map_->safe_seq(), input_map_->aru_seq(), ++fifo_seq_, node_list); ++attempt_seq_; evs_log_debug(D_INSTALL_MSGS) << EVS_LOG_METHOD << imsg; evs_log_info(I_STATE) << "sending install message" << imsg; gcomm_assert(consensus_.is_consistent(imsg)); gu::Buffer buf; serialize(imsg, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed: " << strerror(err); } sent_msgs_[Message::T_INSTALL]++; handle_install(imsg, self_i_); } void gcomm::evs::Proto::send_delayed_list() { DelayedListMessage elm(version_, uuid(), current_view_.id(), ++fifo_seq_); for (DelayedList::const_iterator i(delayed_list_.begin()); i != delayed_list_.end(); ++i) { elm.add(i->first, i->second.state_change_cnt()); } gu::Buffer buf; serialize(elm, buf); Datagram dg(buf); (void)send_down(dg, ProtoDownMeta()); handle_delayed_list(elm, self_i_); } void gcomm::evs::Proto::resend(const UUID& gap_source, const Range range) { gcomm_assert(gap_source != uuid()); gcomm_assert(range.lu() <= range.hs()) << "lu (" << range.lu() << ") > hs(" << range.hs() << ")"; if (range.lu() <= input_map_->safe_seq()) { evs_log_debug(D_RETRANS) << self_string() << "lu (" << range.lu() << ") <= safe_seq(" << input_map_->safe_seq() << "), can't recover message"; return; } evs_log_debug(D_RETRANS) << " retrans requested by " << gap_source << " " << range.lu() << " -> " << range.hs(); seqno_t seq(range.lu()); while (seq <= range.hs()) { InputMap::iterator msg_i = input_map_->find( NodeMap::value(self_i_).index(), seq); if (msg_i == input_map_->end()) { try { gu_trace(msg_i = input_map_->recover( NodeMap::value(self_i_).index(), seq)); } catch (...) { evs_log_debug(D_RETRANS) << "could not recover message " << gap_source << ":" << seq; seq = seq + 1; continue; } } const UserMessage& msg(InputMapMsgIndex::value(msg_i).msg()); gcomm_assert(msg.source() == uuid()); Datagram rb(InputMapMsgIndex::value(msg_i).rb()); assert(rb.offset() == 0); UserMessage um(msg.version(), msg.source(), msg.source_view_id(), msg.seq(), input_map_->aru_seq(), msg.seq_range(), msg.order(), msg.fifo_seq(), msg.user_type(), static_cast( Message::F_RETRANS | (msg.flags() & Message::F_AGGREGATE))); push_header(um, rb); int err = send_down(rb, ProtoDownMeta()); if (err != 0) { log_debug << "send failed: " << strerror(err); break; } else { evs_log_debug(D_RETRANS) << "retransmitted " << um; } seq = seq + msg.seq_range() + 1; retrans_msgs_++; } } void gcomm::evs::Proto::recover(const UUID& gap_source, const UUID& range_uuid, const Range range) { gcomm_assert(gap_source != uuid()) << "gap_source (" << gap_source << ") == uuid() (" << uuid() << " state " << *this; gcomm_assert(range.lu() <= range.hs()) << "lu (" << range.lu() << ") > hs (" << range.hs() << ")"; if (range.lu() <= input_map_->safe_seq()) { evs_log_debug(D_RETRANS) << "lu (" << range.lu() << ") <= safe_seq(" << input_map_->safe_seq() << "), can't recover message"; return; } const Node& range_node(NodeMap::value(known_.find_checked(range_uuid))); const Range im_range(input_map_->range(range_node.index())); evs_log_debug(D_RETRANS) << " recovering message from " << range_uuid << " requested by " << gap_source << " requested range " << range << " available " << im_range; seqno_t seq(range.lu()); while (seq <= range.hs() && seq <= im_range.hs()) { InputMap::iterator msg_i = input_map_->find(range_node.index(), seq); if (msg_i == input_map_->end()) { try { gu_trace(msg_i = input_map_->recover(range_node.index(), seq)); } catch (...) { seq = seq + 1; continue; } } const UserMessage& msg(InputMapMsgIndex::value(msg_i).msg()); assert(msg.source() == range_uuid); Datagram rb(InputMapMsgIndex::value(msg_i).rb()); assert(rb.offset() == 0); UserMessage um(msg.version(), msg.source(), msg.source_view_id(), msg.seq(), msg.aru_seq(), msg.seq_range(), msg.order(), msg.fifo_seq(), msg.user_type(), static_cast( Message::F_SOURCE | Message::F_RETRANS | (msg.flags() & Message::F_AGGREGATE))); push_header(um, rb); int err = send_delegate(rb); if (err != 0) { log_debug << "send failed: " << strerror(err); break; } else { evs_log_debug(D_RETRANS) << "recover " << um; } seq = seq + msg.seq_range() + 1; recovered_msgs_++; } } void gcomm::evs::Proto::handle_foreign(const Message& msg) { // no need to handle foreign LEAVE message if (msg.type() == Message::T_LEAVE) { return; } // Don't handle foreing messages in install phase. // This includes not only INSTALL state, but also // GATHER state after receiving install message. if (install_message_ != 0) { evs_log_debug(D_FOREIGN_MSGS) << " dropping foreign message from " << msg.source() << " in install state"; return; } if (is_msg_from_previous_view(msg) == true) { return; } const UUID& source(msg.source()); evs_log_info(I_STATE) << " detected new message source " << source; NodeMap::iterator i; gu_trace(i = known_.insert_unique( std::make_pair(source, Node(*this)))); assert(NodeMap::value(i).operational() == true); if (state() == S_JOINING || state() == S_GATHER || state() == S_OPERATIONAL) { evs_log_info(I_STATE) << " shift to GATHER due to foreign message from " << msg.source(); gu_trace(shift_to(S_GATHER, false)); // Reset install timer each time foreign message is seen to // synchronize install timers. reset_timer(T_INSTALL); } // Set join message after shift to recovery, shift may clean up // join messages if (msg.type() == Message::T_JOIN) { set_join(static_cast(msg), msg.source()); } send_join(true); } void gcomm::evs::Proto::handle_msg(const Message& msg, const Datagram& rb, bool direct) { assert(msg.type() <= Message::T_DELAYED_LIST); if (msg.type() > Message::T_DELAYED_LIST) { return; } if (state() == S_CLOSED) { return; } if (isolation_end_ != gu::datetime::Date::zero()) { evs_log_debug(D_STATE) << " dropping message due to isolation"; // Isolation period is on return; } if (msg.source() == uuid()) { evs_log_debug(D_FOREIGN_MSGS) << " dropping own message"; return; } if (msg.version() > GCOMM_PROTOCOL_MAX_VERSION) { log_info << "incompatible protocol version " << static_cast(msg.version()); return; } gcomm_assert(msg.source() != UUID::nil()); // Figure out if the message is from known source NodeMap::iterator ii = known_.find(msg.source()); if (ii == known_.end()) { gu_trace(handle_foreign(msg)); return; } Node& node(NodeMap::value(ii)); if (direct == true) { node.set_seen_tstamp(gu::datetime::Date::now()); } if (node.operational() == false && node.leave_message() == 0 && (msg.flags() & Message::F_RETRANS) == 0) { // We have set this node unoperational and there was // probably good reason to do so. Don't accept messages // from it before new view has been formed. // Exceptions: // - Node that is leaving // - Retransmitted messages. // why we accept retransimted messages? // a node sends a message, some nodes(A) get it, but some(B) don't // then this node is non-operational(or unreachable) // so A need to send B the missing message(in envelope as delegate message) // otherwise the input map will not be consistent forever. // and user message in delegate message always comes with F_RETRANS flag. evs_log_debug(D_FOREIGN_MSGS) << " dropping message from unoperational source " << node; return; } // Filter out non-fifo messages if (msg.fifo_seq() != -1 && (msg.flags() & Message::F_RETRANS) == 0) { if (node.fifo_seq() >= msg.fifo_seq()) { evs_log_debug(D_FOREIGN_MSGS) << "droppoing non-fifo message " << msg << " fifo seq " << node.fifo_seq(); return; } else { node.set_fifo_seq(msg.fifo_seq()); } } // Accept non-membership messages only from current view // or from view to be installed if (msg.is_membership() == false && msg.source_view_id() != current_view_.id() && (install_message_ == 0 || install_message_->install_view_id() != msg.source_view_id())) { // If source node seems to be operational but it has proceeded // into new view, mark it as unoperational in order to create // intermediate views before re-merge. if (node.installed() == true && node.operational() == true && is_msg_from_previous_view(msg) == false && state() != S_LEAVING) { if (new_view_logged_ == false) { evs_log_info(I_STATE) << " detected new view from operational source " << msg.source() << ": " << msg.source_view_id(); new_view_logged_ = true; } // Note: Commented out, this causes problems with // attempt_seq. Newly (remotely?) generated install message // followed by commit gap may cause undesired // node inactivation and shift to gather. // // set_inactive(msg.source()); // gu_trace(shift_to(S_GATHER, true)); } evs_log_debug(D_FOREIGN_MSGS) << "dropping non-membership message from foreign view"; return; } else if (NodeMap::value(ii).index() == std::numeric_limits::max() && msg.source_view_id() == current_view_.id()) { log_warn << "Message from node that claims to come from same view but is not in current view " << msg; assert(0); return; } recvd_msgs_[msg.type()]++; switch (msg.type()) { case Message::T_USER: gu_trace(handle_user(static_cast(msg), ii, rb)); break; case Message::T_DELEGATE: gu_trace(handle_delegate(static_cast(msg), ii, rb)); break; case Message::T_GAP: gu_trace(handle_gap(static_cast(msg), ii)); break; case Message::T_JOIN: gu_trace(handle_join(static_cast(msg), ii)); break; case Message::T_LEAVE: gu_trace(handle_leave(static_cast(msg), ii)); break; case Message::T_INSTALL: gu_trace(handle_install(static_cast(msg), ii)); break; case Message::T_DELAYED_LIST: gu_trace(handle_delayed_list( static_cast(msg), ii)); break; default: log_warn << "invalid message type " << msg.type(); } } //////////////////////////////////////////////////////////////////////// // Protolay interface //////////////////////////////////////////////////////////////////////// size_t gcomm::evs::Proto::unserialize_message(const UUID& source, const Datagram& rb, Message* msg) { size_t offset; const gu::byte_t* begin(gcomm::begin(rb)); const size_t available(gcomm::available(rb)); gu_trace(offset = msg->unserialize(begin, available, 0)); if ((msg->flags() & Message::F_SOURCE) == 0) { assert(source != UUID::nil()); gcomm_assert(source != UUID::nil()); msg->set_source(source); } switch (msg->type()) { case Message::T_NONE: gu_throw_fatal; break; case Message::T_USER: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; case Message::T_DELEGATE: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; case Message::T_GAP: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; case Message::T_JOIN: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; case Message::T_INSTALL: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; case Message::T_LEAVE: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; case Message::T_DELAYED_LIST: gu_trace(offset = static_cast(*msg).unserialize( begin, available, offset, true)); break; } return (offset + rb.offset()); } void gcomm::evs::Proto::handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { Message msg; if (state() == S_CLOSED || um.source() == uuid() || is_evicted(um.source())) { // Silent drop return; } gcomm_assert(um.source() != UUID::nil()); try { size_t offset; gu_trace(offset = unserialize_message(um.source(), rb, &msg)); handle_msg(msg, Datagram(rb, offset), (msg.flags() & Message::F_RETRANS) == 0); } catch (gu::Exception& e) { switch (e.get_errno()) { case EPROTONOSUPPORT: log_warn << e.what(); break; case EINVAL: log_warn << "invalid message: " << msg; break; default: log_fatal << "exception caused by message: " << msg; std::cerr << " state after handling message: " << *this; throw; } } } int gcomm::evs::Proto::handle_down(Datagram& wb, const ProtoDownMeta& dm) { if (state() == S_GATHER || state() == S_INSTALL) { return EAGAIN; } else if (state() != S_OPERATIONAL) { log_warn << "user message in state " << to_string(state()); return ENOTCONN; } if (dm.order() == O_LOCAL_CAUSAL) { gu::datetime::Date now(gu::datetime::Date::now()); if (causal_queue_.empty() == true && last_sent_ == input_map_->safe_seq() && causal_keepalive_period_ > gu::datetime::Period(0) && last_causal_keepalive_ + causal_keepalive_period_ > now) { assert(last_sent_ == input_map_->aru_seq()); // Input map should either be empty (all messages // delivered) or the undelivered messages have higher // seqno than safe_seq. Even if the delivry is // done below if needed, this assertion should stay // to catch errors in logic elsewhere in the code. assert(input_map_->begin() == input_map_->end() || input_map_->is_safe(input_map_->begin()) == false); if (input_map_->begin() != input_map_->end() && input_map_->is_safe(input_map_->begin()) == true) { gu_trace(deliver()); if (input_map_->begin() != input_map_->end() && input_map_->is_safe(input_map_->begin()) == true) { // If the input map state is still not good for fast path, // the situation is not likely to clear immediately. Retur // error to retry later. return EAGAIN; } } hs_local_causal_.insert(0.0); deliver_causal(dm.user_type(), last_sent_, wb); } else { seqno_t causal_seqno(input_map_->aru_seq()); if (causal_keepalive_period_ == gu::datetime::Period(0) || last_causal_keepalive_ + causal_keepalive_period_ <= now) { // generate traffic to make sure that group is live Datagram dg; int err(send_user(dg, 0xff, O_DROP, -1, -1)); if (err != 0) { return err; } // reassign causal_seqno to be last_sent: // in order to make sure that the group is live, // safe seqno must be advanced and in this case // safe seqno equals to aru seqno. causal_seqno = last_sent_; last_causal_keepalive_ = now; } causal_queue_.push_back(CausalMessage(dm.user_type(), causal_seqno, wb)); } return 0; } send_queue_s_ += output_.size(); ++n_send_queue_s_; int ret = 0; if (output_.empty() == true) { int err; err = send_user(wb, dm.user_type(), dm.order(), user_send_window_, -1); switch (err) { case EAGAIN: { output_.push_back(std::make_pair(wb, dm)); // Fall through } case 0: ret = 0; break; default: log_error << "send error: " << err; ret = err; } } else if (output_.size() < max_output_size_) { output_.push_back(std::make_pair(wb, dm)); } else { ret = EAGAIN; } return ret; } int gcomm::evs::Proto::send_down(Datagram& dg, const ProtoDownMeta& dm) { if (isolation_end_ != gu::datetime::Date::zero()) { // Node has isolated itself, don't emit any messages return 0; } else { return Protolay::send_down(dg, dm); } } ///////////////////////////////////////////////////////////////////////////// // State handler ///////////////////////////////////////////////////////////////////////////// void gcomm::evs::Proto::shift_to(const State s, const bool send_j) { if (shift_to_rfcnt_ > 0) gu_throw_fatal << *this; shift_to_rfcnt_++; static const bool allowed[S_MAX][S_MAX] = { // CLOSED JOINING LEAVING GATHER INSTALL OPERAT { false, true, false, false, false, false }, // CLOSED { false, false, true, true, false, false }, // JOINING { true, false, false, false, false, false }, // LEAVING { false, false, true, true, true, false }, // GATHER { false, false, false, true, false, true }, // INSTALL { false, false, true, true, false, false } // OPERATIONAL }; assert(s < S_MAX); if (allowed[state_][s] == false) { gu_throw_fatal << "Forbidden state transition: " << to_string(state_) << " -> " << to_string(s); } if (state() != s) { evs_log_info(I_STATE) << " state change: " << to_string(state_) << " -> " << to_string(s); } switch (s) { case S_CLOSED: { gcomm_assert(state() == S_LEAVING); gu_trace(deliver()); gu_trace(deliver_local()); setall_installed(false); NodeMap::value(self_i_).set_installed(true); // Construct install message containing only one node for // last trans view. MessageNodeList node_list; (void)node_list.insert_unique( std::make_pair(uuid(), MessageNode(true, false, NodeMap::value(self_i_).segment(), false, -1, current_view_.id(), input_map_->safe_seq( NodeMap::value(self_i_).index()), input_map_->range( NodeMap::value(self_i_).index())))); InstallMessage im(0, uuid(), current_view_.id(), ViewId(V_REG, uuid(), current_view_.id().seq() + 1), input_map_->safe_seq(), input_map_->aru_seq(), ++fifo_seq_, node_list); gu_trace(deliver_trans_view(im, current_view_)); gu_trace(deliver_trans()); gu_trace(deliver_local(true)); gcomm_assert(causal_queue_.empty() == true); if (collect_stats_ == true) { handle_stats_timer(); } gu_trace(deliver_empty_view()); cleanup_foreign(im); cleanup_views(); timers_.clear(); state_ = S_CLOSED; break; } case S_JOINING: state_ = S_JOINING; reset_timer(T_STATS); break; case S_LEAVING: state_ = S_LEAVING; reset_timer(T_INACTIVITY); reset_timer(T_RETRANS); reset_timer(T_INSTALL); break; case S_GATHER: { setall_committed(false); setall_installed(false); delete install_message_; install_message_ = 0; if (state() == S_OPERATIONAL) { profile_enter(send_user_prof_); while (output_.empty() == false) { int err; gu_trace(err = send_user(-1)); if (err != 0) { gu_throw_fatal << self_string() << "send_user() failed in shifto " << "to S_GATHER: " << strerror(err); } } profile_leave(send_user_prof_); } else { gcomm_assert(output_.empty() == true); } State prev_state(state_); state_ = S_GATHER; if (send_j == true) { profile_enter(send_join_prof_); gu_trace(send_join(false)); profile_leave(send_join_prof_); } gcomm_assert(state() == S_GATHER); reset_timer(T_INACTIVITY); if (prev_state == S_OPERATIONAL || prev_state == S_JOINING) { reset_timer(T_RETRANS); reset_timer(T_INSTALL); } break; } case S_INSTALL: { gcomm_assert(install_message_ != 0); gcomm_assert(is_all_committed() == true); state_ = S_INSTALL; reset_timer(T_INACTIVITY); reset_timer(T_RETRANS); break; } case S_OPERATIONAL: { gcomm_assert(output_.empty() == true); gcomm_assert(install_message_ != 0); gcomm_assert(NodeMap::value(self_i_).join_message() != 0 && consensus_.equal( *NodeMap::value(self_i_).join_message(), *install_message_)) << "install message not consistent with own join, state: " << *this; gcomm_assert(is_all_installed() == true); gu_trace(deliver()); gu_trace(deliver_local()); gu_trace(deliver_trans_view(*install_message_, current_view_)); gu_trace(deliver_trans()); gu_trace(deliver_local(true)); gcomm_assert(causal_queue_.empty() == true); input_map_->clear(); if (collect_stats_ == true) { handle_stats_timer(); } // End of previous view // Construct new view and shift to S_OPERATIONAL before calling // deliver_reg_view(). Reg view delivery may trigger message // exchange on upper layer and operating view is needed to // handle messages. previous_view_ = current_view_; std::copy(gather_views_.begin(), gather_views_.end(), std::inserter(previous_views_, previous_views_.end())); gather_views_.clear(); if (install_message_->version() > current_view_.version()) { log_info << "EVS version upgrade " << current_view_.version() << " -> " << static_cast(install_message_->version()); } else if (install_message_->version() < current_view_.version()) { log_info << "EVS version downgrade " << current_view_.version() << " -> " << static_cast(install_message_->version()); } current_view_ = View(install_message_->version(), install_message_->install_view_id()); size_t idx = 0; const MessageNodeList& imnl(install_message_->node_list()); for (MessageNodeList::const_iterator i(imnl.begin()); i != imnl.end(); ++i) { const UUID& uuid(MessageNodeList::key(i)); const MessageNode& n(MessageNodeList::value(i)); // Add operational nodes to new view, assign input map index NodeMap::iterator nmi(known_.find(uuid)); gcomm_assert(nmi != known_.end()) << "node " << uuid << " not found from known map"; if (n.operational() == true) { current_view_.add_member(uuid, NodeMap::value(nmi).segment()); NodeMap::value(nmi).set_index(idx++); } else { NodeMap::value(nmi).set_index( std::numeric_limits::max()); } } if (previous_view_.id().type() == V_REG && previous_view_.members() == current_view_.members()) { evs_log_info(I_VIEWS) << "subsequent views have same members, prev view " << previous_view_ << " current view " << current_view_; } input_map_->reset(current_view_.members().size()); last_sent_ = -1; state_ = S_OPERATIONAL; deliver_reg_view(*install_message_, previous_view_); cleanup_foreign(*install_message_); cleanup_views(); cleanup_joins(); delete install_message_; install_message_ = 0; attempt_seq_ = 1; install_timeout_count_ = 0; profile_enter(send_gap_prof_); gu_trace(send_gap(EVS_CALLER, UUID::nil(), current_view_.id(), Range()));; profile_leave(send_gap_prof_); gcomm_assert(state() == S_OPERATIONAL); reset_timer(T_INACTIVITY); reset_timer(T_RETRANS); cancel_timer(T_INSTALL); new_view_logged_ = false; break; } default: gu_throw_fatal << "invalid state"; } shift_to_rfcnt_--; } //////////////////////////////////////////////////////////////////////////// // Message delivery //////////////////////////////////////////////////////////////////////////// void gcomm::evs::Proto::deliver_causal(uint8_t user_type, seqno_t seqno, const Datagram& datagram) { send_up(datagram, ProtoUpMeta(uuid(), current_view_.id(), 0, user_type, O_LOCAL_CAUSAL, seqno)); ++delivered_msgs_[O_LOCAL_CAUSAL]; } void gcomm::evs::Proto::deliver_local(bool trans) { // local causal const seqno_t causal_seq(trans == false ? input_map_->safe_seq() : last_sent_); gu::datetime::Date now(gu::datetime::Date::now()); assert(input_map_->begin() == input_map_->end() || input_map_->is_safe(input_map_->begin()) == false); while (causal_queue_.empty() == false && causal_queue_.front().seqno() <= causal_seq) { const CausalMessage& cm(causal_queue_.front()); hs_local_causal_.insert(double(now.get_utc() - cm.tstamp().get_utc())/gu::datetime::Sec); deliver_causal(cm.user_type(), cm.seqno(), cm.datagram()); causal_queue_.pop_front(); } } void gcomm::evs::Proto::validate_reg_msg(const UserMessage& msg) { if (msg.source_view_id() != current_view_.id()) { // Note: This implementation should guarantee same view delivery, // this is sanity check for that. gu_throw_fatal << "reg validate: not current view"; } // Update statistics for locally generated messages if (msg.source() == uuid()) { if (msg.order() == O_SAFE) { gu::datetime::Date now(gu::datetime::Date::now()); double lat(double(now.get_utc() - msg.tstamp().get_utc())/ gu::datetime::Sec); if (info_mask_ & I_STATISTICS) hs_safe_.insert(lat); safe_deliv_latency_.insert(lat); } else if (msg.order() == O_AGREED) { if (info_mask_ & I_STATISTICS) { gu::datetime::Date now(gu::datetime::Date::now()); hs_agreed_.insert(double(now.get_utc() - msg.tstamp().get_utc())/gu::datetime::Sec); } } } } void gcomm::evs::Proto::deliver_finish(const InputMapMsg& msg) { if ((msg.msg().flags() & Message::F_AGGREGATE) == 0) { ++delivered_msgs_[msg.msg().order()]; if (msg.msg().order() != O_DROP) { gu_trace(validate_reg_msg(msg.msg())); profile_enter(delivery_prof_); ProtoUpMeta um(msg.msg().source(), msg.msg().source_view_id(), 0, msg.msg().user_type(), msg.msg().order(), msg.msg().seq()); try { send_up(msg.rb(), um); } catch (...) { log_info << msg.msg() << " " << msg.rb().len(); throw; } profile_leave(delivery_prof_); } } else { gu_trace(validate_reg_msg(msg.msg())); size_t offset(0); while (offset < msg.rb().len()) { ++delivered_msgs_[msg.msg().order()]; AggregateMessage am; gu_trace(am.unserialize(&msg.rb().payload()[0], msg.rb().payload().size(), offset)); Datagram dg( gu::SharedBuffer( new gu::Buffer( &msg.rb().payload()[0] + offset + am.serial_size(), &msg.rb().payload()[0] + offset + am.serial_size() + am.len()))); ProtoUpMeta um(msg.msg().source(), msg.msg().source_view_id(), 0, am.user_type(), msg.msg().order(), msg.msg().seq()); gu_trace(send_up(dg, um)); offset += am.serial_size() + am.len(); } gcomm_assert(offset == msg.rb().len()); } } void gcomm::evs::Proto::deliver() { if (delivering_ == true) { gu_throw_fatal << "Recursive enter to delivery"; } delivering_ = true; if (state() != S_OPERATIONAL && state() != S_GATHER && state() != S_INSTALL && state() != S_LEAVING) { gu_throw_fatal << "invalid state: " << to_string(state()); } evs_log_debug(D_DELIVERY) << " aru_seq=" << input_map_->aru_seq() << " safe_seq=" << input_map_->safe_seq(); // Read input map head until a message which cannot be // delivered is enountered. InputMapMsgIndex::iterator i; while ((i = input_map_->begin()) != input_map_->end()) { const InputMapMsg& msg(InputMapMsgIndex::value(i)); if ((msg.msg().order() <= O_SAFE && input_map_->is_safe(i) == true) || (msg.msg().order() <= O_AGREED && input_map_->is_agreed(i) == true) || (msg.msg().order() <= O_FIFO && input_map_->is_fifo(i) == true)) { deliver_finish(msg); gu_trace(input_map_->erase(i)); } else { if (msg.msg().order() > O_SAFE) { gu_throw_fatal << "Message with order " << msg.msg().order() << " in input map, cannot continue safely"; } break; } } delivering_ = false; assert(input_map_->begin() == input_map_->end() || input_map_->is_safe(input_map_->begin()) == false); } void gcomm::evs::Proto::deliver_trans() { if (delivering_ == true) { gu_throw_fatal << "Recursive enter to delivery"; } delivering_ = true; if (state() != S_INSTALL && state() != S_LEAVING) gu_throw_fatal << "invalid state"; evs_log_debug(D_DELIVERY) << " aru_seq=" << input_map_->aru_seq() << " safe_seq=" << input_map_->safe_seq(); // In transitional configuration we must deliver all messages that // are fifo. This is because: // - We know that it is possible to deliver all fifo messages originated // from partitioned component as safe in partitioned component // - Aru in this component is at least the max known fifo seq // from partitioned component due to message recovery // - All FIFO messages originated from this component must be // delivered to fulfill self delivery requirement and // - FIFO messages originated from this component qualify as AGREED // in transitional configuration InputMap::iterator i, i_next; for (i = input_map_->begin(); i != input_map_->end(); i = i_next) { i_next = i; ++i_next; const InputMapMsg& msg(InputMapMsgIndex::value(i)); bool deliver = false; switch (msg.msg().order()) { case O_SAFE: case O_AGREED: case O_FIFO: case O_DROP: if (input_map_->is_fifo(i) == true) { deliver = true; } break; default: gu_throw_fatal; } if (deliver == true) { if (install_message_ != 0) { const MessageNode& mn( MessageNodeList::value( install_message_->node_list().find_checked( msg.msg().source()))); if (msg.msg().seq() <= mn.im_range().hs()) { deliver_finish(msg); } else { gcomm_assert(mn.operational() == false); log_info << "filtering out trans message higher than " << "install message hs " << mn.im_range().hs() << ": " << msg.msg(); } } else { deliver_finish(msg); } gu_trace(input_map_->erase(i)); } } // Sanity check: // There must not be any messages left that // - Are originated from outside of trans conf and are FIFO // - Are originated from trans conf for (i = input_map_->begin(); i != input_map_->end(); i = i_next) { i_next = i; ++i_next; const InputMapMsg& msg(InputMapMsgIndex::value(i)); NodeMap::iterator ii; gu_trace(ii = known_.find_checked(msg.msg().source())); if (NodeMap::value(ii).installed() == true) { gu_throw_fatal << "Protocol error in transitional delivery " << "(self delivery constraint)"; } else if (input_map_->is_fifo(i) == true) { gu_throw_fatal << "Protocol error in transitional delivery " << "(fifo from partitioned component)"; } gu_trace(input_map_->erase(i)); } delivering_ = false; } ///////////////////////////////////////////////////////////////////////////// // Message handlers ///////////////////////////////////////////////////////////////////////////// gcomm::evs::seqno_t gcomm::evs::Proto::update_im_safe_seq(const size_t uuid, const seqno_t seq) { const seqno_t im_safe_seq(input_map_->safe_seq(uuid)); if (im_safe_seq < seq) { input_map_->set_safe_seq(uuid, seq); } return im_safe_seq; } void gcomm::evs::Proto::handle_user(const UserMessage& msg, NodeMap::iterator ii, const Datagram& rb) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& inst(NodeMap::value(ii)); evs_log_debug(D_USER_MSGS) << "received " << msg; if (msg.source_view_id() != current_view_.id()) { if (state() == S_LEAVING) { // Silent drop return; } if (is_msg_from_previous_view(msg) == true) { evs_log_debug(D_FOREIGN_MSGS) << "user message " << msg << " from previous view"; return; } if (inst.operational() == false) { evs_log_debug(D_STATE) << "dropping message from unoperational source " << msg.source(); return; } else if (inst.installed() == false) { if (install_message_ != 0 && msg.source_view_id() == install_message_->install_view_id()) { assert(state() == S_GATHER || state() == S_INSTALL); evs_log_debug(D_STATE) << " recovery user message " << msg; // This is possible if install timer expires just before // new view is established on this node and retransmitted // install message is received just before user this message. if (state() == S_GATHER) { // Sanity check MessageNodeList::const_iterator self( install_message_->node_list().find(uuid())); gcomm_assert(self != install_message_->node_list().end() && MessageNodeList::value(self).operational() == true); // Mark all operational nodes in install message as // committed for (MessageNodeList::const_iterator mi = install_message_->node_list().begin(); mi != install_message_->node_list().end(); ++mi) { if (MessageNodeList::value(mi).operational() == true) { NodeMap::iterator jj; gu_trace(jj = known_.find_checked( MessageNodeList::key(mi))); NodeMap::value(jj).set_committed(true); } } shift_to(S_INSTALL); } // Other instances installed view before this one, so it is // safe to shift to S_OPERATIONAL // Mark all operational nodes in install message as installed for (MessageNodeList::const_iterator mi = install_message_->node_list().begin(); mi != install_message_->node_list().end(); ++mi) { if (MessageNodeList::value(mi).operational() == true) { NodeMap::iterator jj; gu_trace(jj = known_.find_checked( MessageNodeList::key(mi))); NodeMap::value(jj).set_installed(true); } } inst.set_tstamp(gu::datetime::Date::now()); profile_enter(shift_to_prof_); gu_trace(shift_to(S_OPERATIONAL)); profile_leave(shift_to_prof_); if (pending_leave_ == true) { close(); } // proceed to process actual user message } else { return; } } else { log_debug << self_string() << " unhandled user message " << msg; return; } } gcomm_assert(msg.source_view_id() == current_view_.id()); // note: #gh40 bool shift_to_gather = false; if (install_message_) { const MessageNode& mn( MessageNodeList::value( install_message_->node_list().find_checked( msg.source()))); if (!mn.operational()) return ; if (mn.operational() && msg.seq() > mn.im_range().hs()) { shift_to_gather = true; } } Range range; Range prev_range; seqno_t prev_aru; seqno_t prev_safe; profile_enter(input_map_prof_); prev_aru = input_map_->aru_seq(); prev_range = input_map_->range(inst.index()); // Insert only if msg seq is greater or equal than current lowest unseen if (msg.seq() >= prev_range.lu()) { Datagram im_dgram(rb, rb.offset()); im_dgram.normalize(); gu_trace(range = input_map_->insert(inst.index(), msg, im_dgram)); if (range.lu() > prev_range.lu()) { inst.set_tstamp(gu::datetime::Date::now()); } } else { range = prev_range; } // Update im safe seq for self update_im_safe_seq(NodeMap::value(self_i_).index(), input_map_->aru_seq()); // Update safe seq for message source prev_safe = update_im_safe_seq(inst.index(), msg.aru_seq()); profile_leave(input_map_prof_); // Check for missing messages if (range.hs() > range.lu() && (msg.flags() & Message::F_RETRANS) == 0 ) { evs_log_debug(D_RETRANS) << " requesting retrans from " << msg.source() << " " << range << " due to input map gap, aru " << input_map_->aru_seq(); profile_enter(send_gap_prof_); gu_trace(send_gap(EVS_CALLER, msg.source(), current_view_.id(), range)); profile_leave(send_gap_prof_); } // Seqno range completion and acknowledgement const seqno_t max_hs(input_map_->max_hs()); if (output_.empty() == true && (state() == S_OPERATIONAL || state() == S_GATHER) && (msg.flags() & Message::F_MSG_MORE) == 0 && (last_sent_ < max_hs)) { // Message not originated from this instance, output queue is empty // and last_sent seqno should be advanced gu_trace(complete_user(max_hs)); } else if (output_.empty() == true && input_map_->aru_seq() != prev_aru) { // Output queue empty and aru changed, send gap to inform others evs_log_debug(D_GAP_MSGS) << "sending empty gap"; profile_enter(send_gap_prof_); gu_trace(send_gap(EVS_CALLER, UUID::nil(), current_view_.id(), Range())); profile_leave(send_gap_prof_); } // Send messages if (state() == S_OPERATIONAL) { profile_enter(send_user_prof_); while (output_.empty() == false) { int err; gu_trace(err = send_user(send_window_)); if (err != 0) { break; } } profile_leave(send_user_prof_); } // Deliver messages profile_enter(delivery_prof_); gu_trace(deliver()); gu_trace(deliver_local()); profile_leave(delivery_prof_); // If in recovery state, send join each time input map aru seq reaches // last sent and either input map aru or safe seq has changed. if (state() == S_GATHER && consensus_.highest_reachable_safe_seq() == input_map_->aru_seq() && (prev_aru != input_map_->aru_seq() || prev_safe != input_map_->safe_seq()) && (msg.flags() & Message::F_RETRANS) == 0) { gcomm_assert(output_.empty() == true); if (consensus_.is_consensus() == false) { profile_enter(send_join_prof_); gu_trace(send_join()); profile_leave(send_join_prof_); } } if (shift_to_gather) { shift_to(S_GATHER, true); } } void gcomm::evs::Proto::handle_delegate(const DelegateMessage& msg, NodeMap::iterator ii, const Datagram& rb) { gcomm_assert(ii != known_.end()); evs_log_debug(D_DELEGATE_MSGS) << "delegate message " << msg; Message umsg; size_t offset; gu_trace(offset = unserialize_message(UUID::nil(), rb, &umsg)); gu_trace(handle_msg(umsg, Datagram(rb, offset), false)); } void gcomm::evs::Proto::handle_gap(const GapMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& inst(NodeMap::value(ii)); evs_log_debug(D_GAP_MSGS) << "gap message " << msg; if ((msg.flags() & Message::F_COMMIT) != 0) { log_debug << self_string() << " commit gap from " << msg.source(); if (state() == S_GATHER && install_message_ != 0 && install_message_->install_view_id() == msg.source_view_id() && install_message_->fifo_seq() == msg.seq()) { inst.set_committed(true); inst.set_tstamp(gu::datetime::Date::now()); if (is_all_committed() == true) { shift_to(S_INSTALL); gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range()));; } } else if (state() == S_GATHER && install_message_ != 0 && install_message_->install_view_id() == msg.source_view_id() && install_message_->fifo_seq() < msg.seq()) { // new install message has been generated shift_to(S_GATHER, true); } else { evs_log_debug(D_GAP_MSGS) << " unhandled commit gap " << msg; } return; } else if (state() == S_INSTALL && install_message_ != 0 && install_message_->install_view_id() == msg.source_view_id()) { evs_log_debug(D_STATE) << "install gap " << msg; inst.set_installed(true); inst.set_tstamp(gu::datetime::Date::now()); if (is_all_installed() == true) { profile_enter(shift_to_prof_); gu_trace(shift_to(S_OPERATIONAL)); profile_leave(shift_to_prof_); if (pending_leave_ == true) { close(); } } return; } else if (msg.source_view_id() != current_view_.id()) { if (state() == S_LEAVING) { // Silently drop return; } if (is_msg_from_previous_view(msg) == true) { evs_log_debug(D_FOREIGN_MSGS) << "gap message from previous view"; return; } if (inst.operational() == false) { evs_log_debug(D_STATE) << "dropping message from unoperational source " << msg.source(); } else if (inst.installed() == false) { evs_log_debug(D_STATE) << "dropping message from uninstalled source " << msg.source(); } else { log_debug << "unhandled gap message " << msg; } return; } gcomm_assert(msg.source_view_id() == current_view_.id()); // seqno_t prev_safe; profile_enter(input_map_prof_); prev_safe = update_im_safe_seq(inst.index(), msg.aru_seq()); // Deliver messages and update tstamp only if safe_seq changed // for the source. if (prev_safe != input_map_->safe_seq(inst.index())) { inst.set_tstamp(gu::datetime::Date::now()); } profile_leave(input_map_prof_); // if (msg.range_uuid() == uuid()) { if (msg.range().hs() > last_sent_ && (state() == S_OPERATIONAL || state() == S_GATHER)) { // This could be leaving node requesting messages up to // its last sent. gu_trace(complete_user(msg.range().hs())); } const seqno_t upper_bound( std::min(msg.range().hs(), last_sent_)); if (msg.range().lu() <= upper_bound) { gu_trace(resend(msg.source(), Range(msg.range().lu(), upper_bound))); } } else if ((msg.flags() & Message::F_RETRANS) != 0 && msg.source() != uuid()) { gu_trace(recover(msg.source(), msg.range_uuid(), msg.range())); } // if (state() == S_OPERATIONAL) { if (output_.empty() == false) { profile_enter(send_user_prof_); while (output_.empty() == false) { int err; gu_trace(err = send_user(send_window_)); if (err != 0) break; } profile_leave(send_user_prof_); } else { const seqno_t max_hs(input_map_->max_hs()); if (last_sent_ < max_hs) { gu_trace(complete_user(max_hs)); } } } profile_enter(delivery_prof_); gu_trace(deliver()); gu_trace(deliver_local()); profile_leave(delivery_prof_); // if (state() == S_GATHER && consensus_.highest_reachable_safe_seq() == input_map_->aru_seq() && prev_safe != input_map_->safe_seq() ) { gcomm_assert(output_.empty() == true); if (consensus_.is_consensus() == false) { profile_enter(send_join_prof_); gu_trace(send_join()); profile_leave(send_join_prof_); } } } bool gcomm::evs::Proto::update_im_safe_seqs(const MessageNodeList& node_list) { bool updated = false; // Update input map state for (MessageNodeList::const_iterator i = node_list.begin(); i != node_list.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const Node& local_node(NodeMap::value(known_.find_checked(node_uuid))); const MessageNode& node(MessageNodeList::value(i)); gcomm_assert(node.view_id() == current_view_.id()); const seqno_t safe_seq(node.safe_seq()); seqno_t prev_safe_seq; gu_trace(prev_safe_seq = update_im_safe_seq(local_node.index(), safe_seq)); if (prev_safe_seq != safe_seq && input_map_->safe_seq(local_node.index()) == safe_seq) { updated = true; } } return updated; } void gcomm::evs::Proto::retrans_user(const UUID& nl_uuid, const MessageNodeList& node_list) { for (MessageNodeList::const_iterator i = node_list.begin(); i != node_list.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); const Node& n(NodeMap::value(known_.find_checked(node_uuid))); const Range r(input_map_->range(n.index())); if (node_uuid == uuid() && mn.im_range().lu() != r.lu()) { // Source member is missing messages from us gcomm_assert(mn.im_range().hs() <= last_sent_); gu_trace(resend(nl_uuid, Range(mn.im_range().lu(), last_sent_))); } else if ((mn.operational() == false || mn.leaving() == true) && node_uuid != uuid() && (mn.im_range().lu() < r.lu() || mn.im_range().hs() < r.hs())) { gu_trace(recover(nl_uuid, node_uuid, Range(mn.im_range().lu(), r.hs()))); } } } void gcomm::evs::Proto::retrans_leaves(const MessageNodeList& node_list) { for (NodeMap::const_iterator li = known_.begin(); li != known_.end(); ++li) { const Node& local_node(NodeMap::value(li)); if (local_node.leave_message() != 0 && local_node.is_inactive() == false) { MessageNodeList::const_iterator msg_li( node_list.find(NodeMap::key(li))); if (msg_li == node_list.end() || MessageNodeList::value(msg_li).leaving() == false) { const LeaveMessage& lm(*NodeMap::value(li).leave_message()); LeaveMessage send_lm(lm.version(), lm.source(), lm.source_view_id(), lm.seq(), lm.aru_seq(), lm.fifo_seq(), Message::F_RETRANS | Message::F_SOURCE); gu::Buffer buf; serialize(send_lm, buf); Datagram dg(buf); gu_trace(send_delegate(dg)); } } } } class SelectSuspectsOp { public: SelectSuspectsOp(gcomm::evs::MessageNodeList& nl) : nl_(nl) { } void operator()(const gcomm::evs::MessageNodeList::value_type& vt) const { if (gcomm::evs::MessageNodeList::value(vt).suspected() == true) { nl_.insert_unique(vt); } } private: gcomm::evs::MessageNodeList& nl_; }; void gcomm::evs::Proto::check_suspects(const UUID& source, const MessageNodeList& nl) { assert(source != uuid()); MessageNodeList suspected; for_each(nl.begin(), nl.end(), SelectSuspectsOp(suspected)); for (MessageNodeList::const_iterator i(suspected.begin()); i != suspected.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const MessageNode& node(MessageNodeList::value(i)); if (node.suspected() == true) { if (node_uuid != uuid()) { size_t s_cnt(0); // Iterate over join messages to see if majority of current // view agrees with the suspicion for (NodeMap::const_iterator j(known_.begin()); j != known_.end(); ++j) { const JoinMessage* jm(NodeMap::value(j).join_message()); if (jm != 0 && jm->source() != node_uuid && current_view_.is_member(jm->source()) == true) { MessageNodeList::const_iterator mni(jm->node_list().find(node_uuid)); if (mni != jm->node_list().end()) { const MessageNode& mn(MessageNodeList::value(mni)); if (mn.suspected() == true) { ++s_cnt; } } } } const Node& kn(NodeMap::value(known_.find_checked(node_uuid))); if (kn.operational() == true && s_cnt > current_view_.members().size()/2) { evs_log_info(I_STATE) << " declaring suspected " << node_uuid << " as inactive"; set_inactive(node_uuid); } } } } } void gcomm::evs::Proto::cross_check_inactives(const UUID& source, const MessageNodeList& nl) { assert(source != uuid()); // Do elimination by suspect status NodeMap::const_iterator source_i(known_.find_checked(source)); for (MessageNodeList::const_iterator i(nl.begin()); i != nl.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const MessageNode& node(MessageNodeList::value(i)); if (node.operational() == false) { NodeMap::iterator local_i(known_.find(node_uuid)); if (local_i != known_.end() && node_uuid != uuid()) { const Node& local_node(NodeMap::value(local_i)); if (local_node.suspected()) { // This node is suspecting and the source node has // already set inactve, mark also locally inactive. set_inactive(node_uuid); } } } } } // Asymmetry elimination: // 1a) Find all joins that has this node marked as operational and which // this node considers operational // 1b) Mark all operational nodes without join message unoperational // 2) Iterate over join messages gathered in 1a, find all // unoperational entries and mark them unoperational too void gcomm::evs::Proto::asymmetry_elimination() { // Allow some time to pass from setting install timers to get // join messages accumulated. const gu::datetime::Date now(gu::datetime::Date::now()); TimerList::const_iterator ti( find_if(timers_.begin(), timers_.end(), TimerSelectOp(T_INSTALL))); assert(ti != timers_.end()); if (ti == timers_.end()) { log_warn << "install timer not set in asymmetry_elimination()"; return; } if (install_timeout_ - suspect_timeout_ < TimerList::key(ti) - now) { // No check yet return; } // Record initial operational state for logging std::vector oparr_before(known_.size()); size_t index(0); for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { oparr_before[index] = (NodeMap::value(i).operational() == true); index++; } std::list joins; // Compose list of join messages for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); const JoinMessage* jm(node.join_message()); if (jm != 0) { MessageNodeList::const_iterator self_ref( jm->node_list().find(uuid())); if (node.operational() == true && self_ref != jm->node_list().end() && MessageNodeList::value(self_ref).operational() == true) { joins.push_back(NodeMap::value(i).join_message()); } } else if (node.operational() == true) { evs_log_info(I_STATE) << "marking operational node " << node_uuid << " without " << "join message inactive in asymmetry elimination"; set_inactive(node_uuid); } } // Setting node inactive may remove join message and so invalidate // pointer in joins list, so collect set of UUIDs to set inactive // and do inactivation in separate loop. std::set to_inactive; // Iterate over join messages and collect nodes to be set inactive for (std::list::const_iterator i(joins.begin()); i != joins.end(); ++i) { for (MessageNodeList::const_iterator j((*i)->node_list().begin()); j != (*i)->node_list().end(); ++j) { if (MessageNodeList::value(j).operational() == false) { to_inactive.insert(MessageNodeList::key(j)); } } } joins.clear(); for (std::set::const_iterator i(to_inactive.begin()); i != to_inactive.end(); ++i) { NodeMap::const_iterator ni(known_.find(*i)); if (ni != known_.end()) { if (NodeMap::value(ni).operational() == true) { evs_log_info(I_STATE) << "setting " << *i << " inactive in asymmetry elimination"; set_inactive(*i); } } else { log_warn << "node " << *i << " not found from known list in ae"; } } // Compute final state and log if it has changed std::vector oparr_after(known_.size()); index = 0; for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { oparr_after[index] = (NodeMap::value(i).operational() == true); index++; } if (oparr_before != oparr_after) { evs_log_info(I_STATE) << "before asym elimination"; if (info_mask_ & I_STATE) { std::copy(oparr_before.begin(), oparr_before.end(), std::ostream_iterator(std::cerr, " ")); std::cerr << "\n"; } evs_log_info(I_STATE) << "after asym elimination"; if (info_mask_ & I_STATE) { std::copy(oparr_after.begin(), oparr_after.end(), std::ostream_iterator(std::cerr, " ")); std::cerr << "\n"; } } } // For each node thas has no join message associated, iterate over other // known nodes' join messages to find out if the node without join message // should be declared inactive. void gcomm::evs::Proto::check_unseen() { for (NodeMap::iterator i(known_.begin()); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); Node& node(NodeMap::value(i)); if (node_uuid != uuid() && current_view_.is_member(node_uuid) == false && node.join_message() == 0 && node.operational() == true) { evs_log_debug(D_STATE) << "checking operational unseen " << node_uuid; size_t cnt(0), inact_cnt(0); for (NodeMap::iterator j(known_.begin()); j != known_.end(); ++j) { const JoinMessage* jm(NodeMap::value(j).join_message()); if (jm == 0 || NodeMap::key(j) == uuid()) { continue; } MessageNodeList::const_iterator mn_i; for (mn_i = jm->node_list().begin(); mn_i != jm->node_list().end(); ++mn_i) { NodeMap::const_iterator known_i( known_.find(MessageNodeList::key(mn_i))); if (known_i == known_.end() || (MessageNodeList::value(mn_i).operational() == true && NodeMap::value(known_i).join_message() == 0)) { evs_log_debug(D_STATE) << "all joins not locally present for " << NodeMap::key(j) << " join message node list"; return; } } if ((mn_i = jm->node_list().find(node_uuid)) != jm->node_list().end()) { const MessageNode& mn(MessageNodeList::value(mn_i)); evs_log_debug(D_STATE) << "found " << node_uuid << " from " << NodeMap::key(j) << " join message: " << mn.view_id() << " " << mn.operational(); if (mn.view_id() != ViewId(V_REG)) { ++cnt; if (mn.operational() == false) ++inact_cnt; } } } if (cnt > 0 && cnt == inact_cnt) { evs_log_info(I_STATE) << "unseen node marked inactive by others (cnt=" << cnt << ", inact_cnt=" << inact_cnt << ")"; set_inactive(node_uuid); } } } } // Iterate over all join messages. If some node has nil view id and suspected // flag true in all present join messages, declare it inactive. void gcomm::evs::Proto::check_nil_view_id() { size_t join_counts(0); std::map nil_counts; for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const JoinMessage* jm(NodeMap::value(i).join_message()); if (jm == 0) { continue; } ++join_counts; for (MessageNodeList::const_iterator j(jm->node_list().begin()); j != jm->node_list().end(); ++j) { const MessageNode& mn(MessageNodeList::value(j)); if (mn.view_id() == ViewId(V_REG)) { // todo: investigate why removing mn.suspected() == true // condition causes some unit tests to fail if (mn.suspected() == true) { const UUID& uuid(MessageNodeList::key(j)); ++nil_counts[uuid]; } } } } for (std::map::const_iterator i(nil_counts.begin()); i != nil_counts.end(); ++i) { if (i->second == join_counts && is_inactive(i->first) == false) { log_info << "node " << i->first << " marked with nil view id and suspected in all present" << " join messages, declaring inactive"; set_inactive(i->first); } } } void gcomm::evs::Proto::handle_join(const JoinMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED); Node& inst(NodeMap::value(ii)); evs_log_debug(D_JOIN_MSGS) << " " << msg; if (state() == S_LEAVING) { if (msg.source_view_id() == current_view_.id()) { inst.set_tstamp(gu::datetime::Date::now()); MessageNodeList same_view; for_each(msg.node_list().begin(), msg.node_list().end(), SelectNodesOp(same_view, current_view_.id(), true, true)); profile_enter(input_map_prof_); if (update_im_safe_seqs(same_view) == true) { profile_enter(send_leave_prof_); gu_trace(send_leave(false)); profile_leave(send_leave_prof_); } for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (current_view_.is_member(uuid) == true) { const Range r(input_map_->range(node.index())); if (r.lu() <= last_sent_) { send_gap(EVS_CALLER, uuid, current_view_.id(), Range(r.lu(), last_sent_)); } } } profile_leave(input_map_prof_); gu_trace(retrans_user(msg.source(), same_view)); } return; } else if (is_msg_from_previous_view(msg) == true) { return; } else if (install_message_ != 0) { // Note: don't send join from this branch yet, join is // sent at the end of this method if (install_message_->source() == msg.source()) { evs_log_info(I_STATE) << "shift to gather due to representative " << msg.source() << " join"; if (msg.source_view_id() == install_message_->install_view_id()) { // Representative reached operational state, we follow // Other instances installed view before this one, so it is // safe to shift to S_OPERATIONAL // Mark all operational nodes in install message as installed for (MessageNodeList::const_iterator mi = install_message_->node_list().begin(); mi != install_message_->node_list().end(); ++mi) { if (MessageNodeList::value(mi).operational() == true) { NodeMap::iterator jj; gu_trace(jj = known_.find_checked( MessageNodeList::key(mi))); NodeMap::value(jj).set_installed(true); } } inst.set_tstamp(gu::datetime::Date::now()); if (state() == S_INSTALL) { profile_enter(shift_to_prof_); gu_trace(shift_to(S_OPERATIONAL)); profile_leave(shift_to_prof_); if (pending_leave_ == true) { close(); return; } // proceed to process actual join message } else { log_warn << self_string() << "received join message from new " << "view while in GATHER, dropping"; return; } } gu_trace(shift_to(S_GATHER, false)); } else if (consensus_.is_consistent(*install_message_) == true) { return; // Commented out: It seems to be better strategy to // just wait source of inconsistent join to time out // instead of shifting to gather. #443 // if (consensus_.is_consistent(msg) == true) // { // return; // } // else // { // log_warn << "join message not consistent " << msg; // log_info << "state (stderr): "; // std::cerr << *this << std::endl; // // gu_trace(shift_to(S_GATHER, false)); // } } else { evs_log_info(I_STATE) << "shift to GATHER, install message is " << "inconsistent when handling join from " << msg.source() << " " << msg.source_view_id(); evs_log_info(I_STATE) << "state: " << *this; gu_trace(shift_to(S_GATHER, false)); } } else if (state() != S_GATHER) { evs_log_info(I_STATE) << " shift to GATHER while handling join message from " << msg.source() << " " << msg.source_view_id(); gu_trace(shift_to(S_GATHER, false)); } gcomm_assert(output_.empty() == true); // If source node is member of current view but has already // formed new view, mark it unoperational if (current_view_.is_member(msg.source()) == true && msg.source_view_id().seq() > current_view_.id().seq()) { evs_log_info(I_STATE) << " join source has already formed new view, marking inactive"; set_inactive(msg.source()); return; } // Collect view ids to gather_views_ list. // Add unseen nodes to known list and evicted nodes to evicted list. // Evicted nodes must also be added to known list for GATHER time // bookkeeping. // No need to adjust node state here, it is done later on in // check_suspects()/cross_check_inactives(). for (MessageNodeList::const_iterator i(msg.node_list().begin()); i != msg.node_list().end(); ++i) { NodeMap::iterator ni(known_.find(MessageNodeList::key(i))); const UUID mn_uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); gather_views_.insert(std::make_pair(mn.view_id(), gu::datetime::Date::now())); if (ni == known_.end()) { known_.insert_unique( std::make_pair(mn_uuid, Node(*this))); } // Evict nodes according to join message if (mn_uuid != uuid() && mn.evicted() == true) { set_inactive(mn_uuid); if (is_evicted(mn_uuid) == false) { evict(mn_uuid); } } } // Timestamp source if it sees processing node as operational. // Adjust local entry operational status. MessageNodeList::const_iterator self(msg.node_list().find(uuid())); if (msg.node_list().end() != self) { if(MessageNodeList::value(self).operational() == true) { inst.set_tstamp(gu::datetime::Date::now()); } else { evs_log_info(I_STATE) << " declaring source " << msg.source() << " as inactive (mutual exclusion)"; set_inactive(msg.source()); } } inst.set_join_message(&msg); // Select nodes that are coming from the same view as seen by // message source MessageNodeList same_view; for_each(msg.node_list().begin(), msg.node_list().end(), SelectNodesOp(same_view, current_view_.id(), true, true)); // Find out self from node list MessageNodeList::const_iterator nlself_i(same_view.find(uuid())); // Other node coming from the same view if (msg.source() != uuid() && msg.source_view_id() == current_view_.id()) { gcomm_assert(nlself_i != same_view.end()); // Update input map state (void)update_im_safe_seqs(same_view); // Find out max hs and complete up to that if needed MessageNodeList::const_iterator max_hs_i( max_element(same_view.begin(), same_view.end(), RangeHsCmp())); const seqno_t max_hs(MessageNodeList::value(max_hs_i).im_range().hs()); if (last_sent_ < max_hs) { gu_trace(complete_user(max_hs)); } } // gu_trace(retrans_user(msg.source(), same_view)); // Retrans leave messages that others are missing gu_trace(retrans_leaves(same_view)); // Make cross check to resolve conflict if two nodes // declare each other inactive. There is no need to make // this for own messages. if (msg.source() != uuid()) { gu_trace(check_suspects(msg.source(), same_view)); gu_trace(cross_check_inactives(msg.source(), same_view)); gu_trace(check_unseen()); gu_trace(check_nil_view_id()); } // Eliminate asymmetry according to operational status flags in // join messages gu_trace(asymmetry_elimination()); // If current join message differs from current state, send new join const JoinMessage* curr_join(NodeMap::value(self_i_).join_message()); MessageNodeList new_nl; populate_node_list(&new_nl); if (curr_join == 0 || (curr_join->aru_seq() != input_map_->aru_seq() || curr_join->seq() != input_map_->safe_seq() || curr_join->node_list() != new_nl)) { gu_trace(create_join()); if (consensus_.is_consensus() == false) { send_join(false); } } if (consensus_.is_consensus() == true) { if (is_representative(uuid()) == true) { gu_trace(send_install(EVS_CALLER)); } } } void gcomm::evs::Proto::handle_leave(const LeaveMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& node(NodeMap::value(ii)); evs_log_debug(D_LEAVE_MSGS) << "leave message " << msg; if (msg.source() != uuid() && node.is_inactive() == true) { evs_log_debug(D_LEAVE_MSGS) << "dropping leave from already inactive"; return; } node.set_leave_message(&msg); if (msg.source() == uuid()) { // The last one to live, instant close. Otherwise continue // serving until it becomes apparent that others have // leave message. if (current_view_.members().size() == 1) { profile_enter(shift_to_prof_); gu_trace(shift_to(S_CLOSED)); profile_leave(shift_to_prof_); } } else { // Always set node nonoperational if leave message is seen node.set_operational(false); if (msg.source_view_id() != current_view_.id() || is_msg_from_previous_view(msg) == true) { // Silent drop return; } const seqno_t prev_safe_seq(update_im_safe_seq(node.index(), msg.aru_seq())); if (prev_safe_seq != input_map_->safe_seq(node.index())) { node.set_tstamp(gu::datetime::Date::now()); } if (state() == S_OPERATIONAL) { profile_enter(shift_to_prof_); evs_log_info(I_STATE) << " shift to GATHER when handling leave from " << msg.source() << " " << msg.source_view_id(); gu_trace(shift_to(S_GATHER, true)); profile_leave(shift_to_prof_); } else if (state() == S_GATHER && prev_safe_seq != input_map_->safe_seq(node.index())) { profile_enter(send_join_prof_); gu_trace(send_join()); profile_leave(send_join_prof_); } } } void gcomm::evs::Proto::handle_install(const InstallMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& inst(NodeMap::value(ii)); evs_log_debug(D_INSTALL_MSGS) << "install msg " << msg; if (state() == S_LEAVING) { // Check if others have receievd leave message or declared // as unoperational before shifting to closed. MessageNodeList::const_iterator mn_i(msg.node_list().find(uuid())); if (mn_i != msg.node_list().end()) { const MessageNode& mn(MessageNodeList::value(mn_i)); if (mn.operational() == false || mn.leaving() == true) { profile_enter(shift_to_prof_); gu_trace(shift_to(S_CLOSED)); profile_leave(shift_to_prof_); } } return; } else if (state() == S_OPERATIONAL) { // Drop install messages in operational state. evs_log_debug(D_INSTALL_MSGS) << "dropping install message in already installed view"; return; } else if (inst.operational() == false) { // Message source is not seen as operational, must not accept // anything from it. evs_log_debug(D_INSTALL_MSGS) << "install message source " << msg.source() << " is not operational, discarding message"; return; } else if (is_msg_from_previous_view(msg) == true) { // Delayed install message evs_log_debug(D_FOREIGN_MSGS) << " dropping install message from previous view"; return; } else if (install_message_ != 0) { if (msg.source() == install_message_->source() && msg.install_view_id().seq() > install_message_->install_view_id().seq()) { // Representative regenerated install message evs_log_debug(D_INSTALL_MSGS) << "regenerated install message"; setall_committed(false); setall_installed(false); delete install_message_; install_message_ = 0; // Fall through to process new install message } else if (msg.source() == install_message_->source()) { // Duplicate or delayed install message evs_log_debug(D_INSTALL_MSGS) << "duplicate or delayed install message"; return; } else { MessageNodeList::const_iterator self(msg.node_list().find(uuid())); if (msg.node_list().end() == self || MessageNodeList::value(self).operational() == false) { evs_log_debug(D_INSTALL_MSGS) << "dropping install message, processing node not in " << "new view"; } else { // Two nodes decided to generate install message simultaneously, // shift to gather to combine groups in install messages. log_warn << self_string() << " shift to GATHER due to conflicting install " << "messages"; gu_trace(shift_to(S_GATHER)); } return; } } else if (inst.installed() == true) { log_warn << self_string() << " shift to GATHER due to inconsistent state"; profile_enter(shift_to_prof_); gu_trace(shift_to(S_GATHER)); profile_leave(shift_to_prof_); return; } // Construct join from install message so that the most recent // information from representative is updated to local state. if (msg.source() != uuid()) { const MessageNode& mn( MessageNodeList::value( msg.node_list().find_checked(msg.source()))); JoinMessage jm(msg.version(), msg.source(), mn.view_id(), msg.seq(), msg.aru_seq(), msg.fifo_seq(), msg.node_list()); handle_join(jm, ii); } // Drop install message if processing node won't be part of the // view to be installed. // Don't set nodes that are forming another view inactive here, // they should enter new view shortly after install message // delivery and should be ready to restart GATHER round. MessageNodeList::const_iterator self(msg.node_list().find(uuid())); if (msg.node_list().end() == self || MessageNodeList::value(self).operational() == false) { evs_log_debug(D_INSTALL_MSGS) << "dropping install message, processing node not in new view"; return; } // Proceed to install phase assert(install_message_ == 0); // Run through known nodes and remove each entry that is // not member of current view or present in install message. // This is to prevent inconsistent view of group when first message(s) // from new node are received after install message on representative // and before install message on other nodes. bool changed(false); NodeMap::iterator i, i_next; for (NodeMap::iterator i(known_.begin()); i != known_.end(); i = i_next) { i_next = i, ++i_next; const UUID& uuid(NodeMap::key(i)); if (msg.node_list().find(uuid) == msg.node_list().end() && current_view_.members().find(uuid) == current_view_.members().end()) { log_info << self_string() << " temporarily discarding known " << uuid << " due to received install message"; known_.erase(i); changed = true; } } // Recreate join message to match current state, otherwise is_consistent() // below will fail. if (changed == true) { (void)create_join(); } // See if install message is consistent with local state. // Is_consistent() checks only local state and local join // message in case other nodes have already been seen and reported // nodes that will not be in the next view. if (consensus_.is_consistent(msg) == true) { inst.set_tstamp(gu::datetime::Date::now()); install_message_ = new InstallMessage(msg); assert(install_message_->source() != UUID::nil()); assert(install_message_->flags() != 0); profile_enter(send_gap_prof_); // Send commit gap gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range(), true)); profile_leave(send_gap_prof_); } else { evs_log_debug(D_INSTALL_MSGS) << "install message " << msg << " not consistent with state " << *this; profile_enter(shift_to_prof_); gu_trace(shift_to(S_GATHER, true)); profile_leave(shift_to_prof_); } } void gcomm::evs::Proto::handle_delayed_list(const DelayedListMessage& msg, NodeMap::iterator ii) { if (auto_evict_ == 0) { // Ignore evict list messages if auto_evict_ is disabled. return; } Node& node(NodeMap::value(ii)); node.set_delayed_list_message(&msg); gu::datetime::Date now(gu::datetime::Date::now()); // Construct a list of evict candidates that appear in evict list messages // with cnt greater than local auto_evict_. If evict candidate is reported // by majority of the current group, evict process is triggered. // UUID -> over auto_evict_, total count typedef std::map > Evicts; Evicts evicts; bool found(false); for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const DelayedListMessage* const dlm( NodeMap::value(i).delayed_list_message()); if (dlm == 0) { continue; } else if (dlm->delayed_list().find(uuid()) != dlm->delayed_list().end()) { evs_log_debug(D_STATE) << "found self " << uuid() << " from evict list from " << msg.source() << " at " << get_address(msg.source()); continue; } else if (dlm->tstamp() + delayed_keep_period_ < now) { evs_log_debug(D_STATE) << "ignoring expired evict message"; continue; } for (DelayedListMessage::DelayedList::const_iterator dlm_i(dlm->delayed_list().begin()); dlm_i != dlm->delayed_list().end(); ++dlm_i) { if (dlm_i->second <= 1) { // Don't consider entries with single delayed event as // evict candidates. continue; } std::pair eir( evicts.insert( std::make_pair( dlm_i->first, std::make_pair(0, 0)))); evs_log_debug(D_STATE) << "eir " << eir.first->first << " " << eir.first->second.first << " " << eir.first->second.second; ++eir.first->second.second; // total count if (dlm_i->second >= auto_evict_) { ++eir.first->second.first; // over threshold count found = true; } } } // Evict candidates that have reached threshold count for (Evicts::const_iterator i(evicts.begin()); found == true && i != evicts.end(); ++i) { if (is_evicted(i->first) == true) { // Already evicted, avoid spamming continue; } evs_log_info(I_STATE) << "evict candidate " << i->first << " " << i->second.first << " " << i->second.second; // If the candidate is in the current view, require majority // of the view to agree. If the candidate is not in the current // view, require majority of known nodes to agree. Ability to // evict nodes outside of the group (even while in non-PC) is // needed to stabilize cluster also in the case that nodes // have already partitioned. // TODO: Record stable views from PC and use weights from there // accordingly (need to be added to view). if (i->second.first != 0 && ((current_view_.is_member(i->first) && i->second.second > current_view_.members().size()/2) || i->second.second > known_.size()/2)) { log_warn << "evicting member " << i->first << " at " << get_address(i->first) << " permanently from group"; evict(i->first); if (state() == S_OPERATIONAL) { shift_to(S_GATHER, true); } } } } galera-3-25.3.20/gcomm/src/uuid.cpp0000644000015300001660000000024413042054732016501 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "gcomm/uuid.hpp" const gcomm::UUID gcomm::UUID::uuid_nil_ = gcomm::UUID(GU_UUID_NIL); galera-3-25.3.20/gcomm/src/evs_consensus.hpp0000644000015300001660000000361113042054732020436 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #ifndef GCOMM_EVS_CONSENSUS_HPP #define GCOMM_EVS_CONSENSUS_HPP #include "evs_seqno.hpp" namespace gcomm { class UUID; class View; namespace evs { class NodeMap; class InputMap; class Message; class Consensus; class Proto; } } class gcomm::evs::Consensus { public: Consensus(const Proto& proto, const NodeMap& known, const InputMap& input_map, const View& current_view) : proto_ (proto), known_ (known), input_map_ (input_map), current_view_(current_view) { } /*! * Compare two messages if they are equal in consensus context. */ bool equal(const Message&, const Message&) const; /*! * Compute highest reachable safe seq from local state. * * @return Highest reachable safe seq. */ seqno_t highest_reachable_safe_seq() const; // input map safe seq but without considering // all suspected leaving nodes. seqno_t safe_seq_wo_all_susupected_leaving_nodes() const; /*! * Check if highest reachable safe seq according to message * consistent with local state. */ bool is_consistent_highest_reachable_safe_seq(const Message&) const; /*! * Check if message aru seq, safe seq and node ranges matches to * local state. */ bool is_consistent_input_map(const Message&) const; bool is_consistent_partitioning(const Message&) const; bool is_consistent_leaving(const Message&) const; bool is_consistent_same_view(const Message&) const; bool is_consistent(const Message&) const; bool is_consensus() const; private: const Proto& proto_; const NodeMap& known_; const InputMap& input_map_; const View& current_view_; }; #endif // GCOMM_EVS_CONSENSUS_HPP galera-3-25.3.20/gcomm/src/conf.cpp0000644000015300001660000001651113042054732016464 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "gcomm/conf.hpp" #include "defaults.hpp" #include "common.h" #include static std::string const Delim = "."; // Protonet std::string const gcomm::Conf::ProtonetBackend("protonet.backend"); std::string const gcomm::Conf::ProtonetVersion("protonet.version"); // TCP static std::string const SocketPrefix("socket" + Delim); std::string const gcomm::Conf::TcpNonBlocking = SocketPrefix + "non_blocking"; std::string const gcomm::Conf::SocketChecksum = SocketPrefix + "checksum"; std::string const gcomm::Conf::SocketRecvBufSize = SocketPrefix + "recv_buf_size"; // GMCast std::string const gcomm::Conf::GMCastScheme = "gmcast"; static std::string const GMCastPrefix(gcomm::Conf::GMCastScheme + Delim); std::string const gcomm::Conf::GMCastVersion = GMCastPrefix + "version"; std::string const gcomm::Conf::GMCastGroup = GMCastPrefix + "group"; std::string const gcomm::Conf::GMCastListenAddr = GMCastPrefix + "listen_addr"; std::string const gcomm::Conf::GMCastMCastAddr = GMCastPrefix + "mcast_addr"; std::string const gcomm::Conf::GMCastMCastPort = GMCastPrefix + "mcast_port"; std::string const gcomm::Conf::GMCastMCastTTL = GMCastPrefix + "mcast_ttl"; std::string const gcomm::Conf::GMCastTimeWait = GMCastPrefix + "time_wait"; std::string const gcomm::Conf::GMCastPeerTimeout = GMCastPrefix + "peer_timeout"; std::string const gcomm::Conf::GMCastMaxInitialReconnectAttempts = GMCastPrefix + "mira"; std::string const gcomm::Conf::GMCastPeerAddr = GMCastPrefix + "peer_addr"; std::string const gcomm::Conf::GMCastIsolate = GMCastPrefix + "isolate"; std::string const gcomm::Conf::GMCastSegment = GMCastPrefix + "segment"; // EVS std::string const gcomm::Conf::EvsScheme = "evs"; static std::string const EvsPrefix(gcomm::Conf::EvsScheme + Delim); std::string const gcomm::Conf::EvsVersion = EvsPrefix + "version"; std::string const gcomm::Conf::EvsViewForgetTimeout = EvsPrefix + "view_forget_timeout"; std::string const gcomm::Conf::EvsInactiveTimeout = EvsPrefix + "inactive_timeout"; std::string const gcomm::Conf::EvsSuspectTimeout = EvsPrefix + "suspect_timeout"; std::string const gcomm::Conf::EvsInactiveCheckPeriod = EvsPrefix + "inactive_check_period"; std::string const gcomm::Conf::EvsInstallTimeout = EvsPrefix + "install_timeout"; std::string const gcomm::Conf::EvsKeepalivePeriod = EvsPrefix + "keepalive_period"; std::string const gcomm::Conf::EvsJoinRetransPeriod = EvsPrefix + "join_retrans_period"; std::string const gcomm::Conf::EvsStatsReportPeriod = EvsPrefix + "stats_report_period"; std::string const gcomm::Conf::EvsDebugLogMask = EvsPrefix + "debug_log_mask"; std::string const gcomm::Conf::EvsInfoLogMask = EvsPrefix + "info_log_mask"; std::string const gcomm::Conf::EvsSendWindow = EvsPrefix + "send_window"; std::string const gcomm::Conf::EvsUserSendWindow = EvsPrefix + "user_send_window"; std::string const gcomm::Conf::EvsUseAggregate = EvsPrefix + "use_aggregate"; std::string const gcomm::Conf::EvsCausalKeepalivePeriod = EvsPrefix + "causal_keepalive_period"; std::string const gcomm::Conf::EvsMaxInstallTimeouts = EvsPrefix + "max_install_timeouts"; std::string const gcomm::Conf::EvsDelayMargin = EvsPrefix + "delay_margin"; std::string const gcomm::Conf::EvsDelayedKeepPeriod = EvsPrefix + "delayed_keep_period"; std::string const gcomm::Conf::EvsEvict = EvsPrefix + "evict"; std::string const gcomm::Conf::EvsAutoEvict = EvsPrefix + "auto_evict"; // PC std::string const gcomm::Conf::PcScheme = "pc"; static std::string const PcPrefix(gcomm::Conf::PcScheme + Delim); std::string const gcomm::Conf::PcVersion = PcPrefix + "version"; std::string const gcomm::Conf::PcIgnoreSb = PcPrefix + "ignore_sb"; std::string const gcomm::Conf::PcIgnoreQuorum = PcPrefix + "ignore_quorum"; std::string const gcomm::Conf::PcChecksum = PcPrefix + "checksum"; std::string const gcomm::Conf::PcLinger = PcPrefix + "linger"; std::string const gcomm::Conf::PcAnnounceTimeout = PcPrefix + "announce_timeout"; std::string const gcomm::Conf::PcNpvo = PcPrefix + "npvo"; std::string const gcomm::Conf::PcBootstrap = PcPrefix + "bootstrap"; std::string const gcomm::Conf::PcWaitPrim = PcPrefix + "wait_prim"; std::string const gcomm::Conf::PcWaitPrimTimeout = PcPrefix + "wait_prim_timeout"; std::string const gcomm::Conf::PcWeight = PcPrefix + "weight"; std::string const gcomm::Conf::PcRecovery = PcPrefix + "recovery"; void gcomm::Conf::register_params(gu::Config& cnf) { #define GCOMM_CONF_ADD(_x_) cnf.add(_x_); #define GCOMM_CONF_ADD_DEFAULT(_x_) cnf.add(_x_, Defaults::_x_); GCOMM_CONF_ADD (COMMON_BASE_HOST_KEY); GCOMM_CONF_ADD (COMMON_BASE_PORT_KEY); GCOMM_CONF_ADD_DEFAULT(ProtonetBackend); GCOMM_CONF_ADD_DEFAULT(ProtonetVersion); GCOMM_CONF_ADD (TcpNonBlocking); GCOMM_CONF_ADD_DEFAULT(SocketChecksum); GCOMM_CONF_ADD_DEFAULT(SocketRecvBufSize); GCOMM_CONF_ADD_DEFAULT(GMCastVersion); GCOMM_CONF_ADD (GMCastGroup); GCOMM_CONF_ADD (GMCastListenAddr); GCOMM_CONF_ADD (GMCastMCastAddr); GCOMM_CONF_ADD (GMCastMCastPort); GCOMM_CONF_ADD (GMCastMCastTTL); GCOMM_CONF_ADD (GMCastMCastAddr); GCOMM_CONF_ADD (GMCastTimeWait); GCOMM_CONF_ADD (GMCastPeerTimeout); GCOMM_CONF_ADD (GMCastMaxInitialReconnectAttempts); GCOMM_CONF_ADD (GMCastPeerAddr); GCOMM_CONF_ADD (GMCastIsolate); GCOMM_CONF_ADD_DEFAULT(GMCastSegment); GCOMM_CONF_ADD (EvsVersion); GCOMM_CONF_ADD_DEFAULT(EvsViewForgetTimeout); GCOMM_CONF_ADD_DEFAULT(EvsSuspectTimeout); GCOMM_CONF_ADD_DEFAULT(EvsInactiveTimeout); GCOMM_CONF_ADD_DEFAULT(EvsInactiveCheckPeriod); GCOMM_CONF_ADD (EvsInstallTimeout); GCOMM_CONF_ADD (EvsKeepalivePeriod); GCOMM_CONF_ADD_DEFAULT(EvsJoinRetransPeriod); GCOMM_CONF_ADD_DEFAULT(EvsStatsReportPeriod); GCOMM_CONF_ADD (EvsDebugLogMask); GCOMM_CONF_ADD (EvsInfoLogMask); GCOMM_CONF_ADD_DEFAULT(EvsSendWindow); GCOMM_CONF_ADD_DEFAULT(EvsUserSendWindow); GCOMM_CONF_ADD (EvsUseAggregate); GCOMM_CONF_ADD (EvsCausalKeepalivePeriod); GCOMM_CONF_ADD_DEFAULT(EvsMaxInstallTimeouts); GCOMM_CONF_ADD_DEFAULT(EvsDelayMargin); GCOMM_CONF_ADD_DEFAULT(EvsDelayedKeepPeriod); GCOMM_CONF_ADD (EvsEvict); GCOMM_CONF_ADD_DEFAULT(EvsAutoEvict); GCOMM_CONF_ADD_DEFAULT(PcVersion); GCOMM_CONF_ADD_DEFAULT(PcIgnoreSb); GCOMM_CONF_ADD_DEFAULT(PcIgnoreQuorum); GCOMM_CONF_ADD_DEFAULT(PcChecksum); GCOMM_CONF_ADD_DEFAULT(PcAnnounceTimeout); GCOMM_CONF_ADD (PcLinger); GCOMM_CONF_ADD_DEFAULT(PcNpvo); GCOMM_CONF_ADD (PcBootstrap); GCOMM_CONF_ADD_DEFAULT(PcWaitPrim); GCOMM_CONF_ADD_DEFAULT(PcWaitPrimTimeout); GCOMM_CONF_ADD_DEFAULT(PcWeight); GCOMM_CONF_ADD_DEFAULT(PcRecovery); #undef GCOMM_CONF_ADD #undef GCOMM_CONF_ADD_DEFAULT } void gcomm::Conf::check_params(const gu::Config& conf) { check_recv_buf_size(conf.get(SocketRecvBufSize)); } size_t gcomm::Conf::check_recv_buf_size(const std::string& str) { // signed type to check for negative values return check_range(SocketRecvBufSize, str, 0, std::numeric_limits::max()); } galera-3-25.3.20/gcomm/src/pc_proto.hpp0000644000015300001660000001652513042054732017376 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_PC_PROTO_HPP #define GCOMM_PC_PROTO_HPP #include #include #include "gcomm/uuid.hpp" #include "gcomm/protolay.hpp" #include "gcomm/conf.hpp" #include "pc_message.hpp" #include "defaults.hpp" #include "gu_uri.hpp" #ifndef GCOMM_PC_MAX_VERSION #define GCOMM_PC_MAX_VERSION 0 #endif // GCOMM_PC_MAX_VERSION namespace gcomm { namespace pc { class Proto; std::ostream& operator<<(std::ostream& os, const Proto& p); } } class gcomm::pc::Proto : public Protolay { public: enum State { S_CLOSED, S_STATES_EXCH, S_INSTALL, S_PRIM, S_TRANS, S_NON_PRIM, S_MAX }; static std::string to_string(const State s) { switch (s) { case S_CLOSED: return "CLOSED"; case S_STATES_EXCH: return "STATES_EXCH"; case S_INSTALL: return "INSTALL"; case S_TRANS: return "TRANS"; case S_PRIM: return "PRIM"; case S_NON_PRIM: return "NON_PRIM"; default: gu_throw_fatal << "Invalid state"; } } Proto(gu::Config& conf, const UUID& uuid, SegmentId segment, const gu::URI& uri = gu::URI("pc://"), View* rst_view = NULL) : Protolay(conf), my_uuid_ (uuid), start_prim_ (), npvo_ (param(conf, uri, Conf::PcNpvo, Defaults::PcNpvo)), ignore_quorum_ (param(conf, uri, Conf::PcIgnoreQuorum, Defaults::PcIgnoreQuorum)), ignore_sb_ (param(conf, uri, Conf::PcIgnoreSb, gu::to_string(ignore_quorum_))), closing_ (false), state_ (S_CLOSED), last_sent_seq_ (0), checksum_ (param(conf, uri, Conf::PcChecksum, Defaults::PcChecksum)), instances_ (), self_i_ (instances_.insert_unique(std::make_pair(uuid, Node()))), state_msgs_ (), current_view_ (0, V_NONE), pc_view_ (0, V_NON_PRIM), views_ (), mtu_ (std::numeric_limits::max()), weight_ (check_range(Conf::PcWeight, param(conf, uri, Conf::PcWeight, Defaults::PcWeight), 0, 0xff)), rst_view_ () { set_weight(weight_); NodeMap::value(self_i_).set_segment(segment); if (rst_view) { set_restored_view(rst_view); } conf.set(Conf::PcNpvo, gu::to_string(npvo_)); conf.set(Conf::PcIgnoreQuorum, gu::to_string(ignore_quorum_)); conf.set(Conf::PcIgnoreSb, gu::to_string(ignore_sb_)); conf.set(Conf::PcChecksum, gu::to_string(checksum_)); conf.set(Conf::PcWeight, gu::to_string(weight_)); } ~Proto() { } const UUID& uuid() const { return my_uuid_; } bool prim() const { return NodeMap::value(self_i_).prim(); } void set_prim(const bool val) { NodeMap::value(self_i_).set_prim(val); } void mark_non_prim(); const ViewId& last_prim() const { return NodeMap::value(self_i_).last_prim(); } void set_last_prim(const ViewId& vid) { gcomm_assert(vid.type() == V_PRIM); NodeMap::value(self_i_).set_last_prim(vid); } uint32_t last_seq() const { return NodeMap::value(self_i_).last_seq(); } void set_last_seq(const uint32_t seq) { NodeMap::value(self_i_).set_last_seq(seq); } int64_t to_seq() const { return NodeMap::value(self_i_).to_seq(); } void set_to_seq(const int64_t seq) { NodeMap::value(self_i_).set_to_seq(seq); } void set_weight(int weight) { NodeMap::value(self_i_).set_weight(weight); } class SMMap : public Map { }; const View& current_view() const { return current_view_; } const UUID& self_id() const { return my_uuid_; } State state() const { return state_; } void shift_to (State); void send_state (); void send_install(bool bootstrap, int weight = -1); void handle_first_trans (const View&); void handle_trans (const View&); void handle_reg (const View&); void handle_msg (const Message&, const Datagram&, const ProtoUpMeta&); void handle_up (const void*, const Datagram&, const ProtoUpMeta&); int handle_down (Datagram&, const ProtoDownMeta&); void connect(bool first) { log_debug << self_id() << " start_prim " << first; start_prim_ = first; closing_ = false; shift_to(S_NON_PRIM); } void close(bool force = false) { closing_ = true; } void handle_view (const View&); bool set_param(const std::string& key, const std::string& val); void set_mtu(size_t mtu) { mtu_ = mtu; } size_t mtu() const { return mtu_; } void set_restored_view(View* rst_view) { gcomm_assert(state_ == S_CLOSED); rst_view_ = rst_view; NodeMap::value(self_i_).set_last_prim( // set last prim just for exchanging uuid and seq. // but actually restored view is not actual prim view. ViewId(V_NON_PRIM, rst_view -> id().uuid(), rst_view -> id().seq())); } const View* restored_view() const { return rst_view_; } private: friend std::ostream& operator<<(std::ostream& os, const Proto& p); Proto (const Proto&); Proto& operator=(const Proto&); bool requires_rtr() const; bool is_prim() const; bool have_quorum(const View&, const View&) const; bool have_split_brain(const View&) const; void validate_state_msgs() const; void cleanup_instances(); void handle_state(const Message&, const UUID&); void handle_install(const Message&, const UUID&); void handle_trans_install(const Message&, const UUID&); void handle_user(const Message&, const Datagram&, const ProtoUpMeta&); void deliver_view(bool bootstrap = false); UUID const my_uuid_; // Node uuid bool start_prim_; // Is allowed to start in prim comp bool npvo_; // Newer prim view overrides bool ignore_quorum_; // Ignore lack of quorum bool ignore_sb_; // Ignore split-brain condition bool closing_; // Protocol is in closing stage State state_; // State uint32_t last_sent_seq_; // Msg seqno of last sent message bool checksum_; // Enable message checksumming NodeMap instances_; // Map of known node instances NodeMap::iterator self_i_; // Iterator pointing to self node instance SMMap state_msgs_; // Map of received state messages View current_view_; // EVS view View pc_view_; // PC view std::list views_; // List of seen views size_t mtu_; // Maximum transmission unit int weight_; // Node weight in voting View* rst_view_; // restored PC view }; #endif // PC_PROTO_HPP galera-3-25.3.20/gcomm/src/protocol_version.hpp0000644000015300001660000000031713042054732021147 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #ifndef GCOMM_PROTOCOL_VERSION_HPP #define GCOMM_PROTOCOL_VERSION_HPP #define GCOMM_PROTOCOL_MAX_VERSION 1 #endif // GCOMM_PROTOCOL_VERSION_HPP galera-3-25.3.20/gcomm/src/defaults.cpp0000644000015300001660000000541413042054732017346 0ustar jenkinsjenkins/* * Copyright (C) 2012-2014 Codership Oy */ #include "defaults.hpp" #include "gcomm/common.hpp" namespace gcomm { #ifdef HAVE_ASIO_HPP std::string const Defaults::ProtonetBackend = "asio"; #else #error "Only asio protonet backend is currently supported" #endif /* HAVE_ASIO_HPP */ std::string const Defaults::ProtonetVersion = "0"; std::string const Defaults::SocketChecksum = "2"; std::string const Defaults::SocketRecvBufSize = "212992"; std::string const Defaults::GMCastVersion = "0"; std::string const Defaults::GMCastTcpPort = BASE_PORT_DEFAULT; std::string const Defaults::GMCastSegment = "0"; std::string const Defaults::GMCastTimeWait = "PT5S"; std::string const Defaults::GMCastPeerTimeout = "PT3S"; std::string const Defaults::EvsViewForgetTimeout = "PT24H"; std::string const Defaults::EvsViewForgetTimeoutMin = "PT1S"; std::string const Defaults::EvsInactiveCheckPeriod = "PT0.5S"; std::string const Defaults::EvsSuspectTimeout = "PT5S"; std::string const Defaults::EvsSuspectTimeoutMin = "PT0.1S"; std::string const Defaults::EvsInactiveTimeout = "PT15S"; std::string const Defaults::EvsInactiveTimeoutMin = "PT0.1S"; std::string const Defaults::EvsRetransPeriod = "PT1S"; std::string const Defaults::EvsRetransPeriodMin = "PT0.1S"; std::string const Defaults::EvsJoinRetransPeriod = "PT1S"; std::string const Defaults::EvsStatsReportPeriod = "PT1M"; std::string const Defaults::EvsStatsReportPeriodMin = "PT1S"; std::string const Defaults::EvsSendWindow = "4"; std::string const Defaults::EvsSendWindowMin = "1"; std::string const Defaults::EvsUserSendWindow = "2"; std::string const Defaults::EvsUserSendWindowMin = "1"; std::string const Defaults::EvsMaxInstallTimeouts = "3"; std::string const Defaults::EvsDelayMargin = "PT1S"; std::string const Defaults::EvsDelayedKeepPeriod = "PT30S"; std::string const Defaults::EvsAutoEvict = "0"; std::string const Defaults::PcAnnounceTimeout = "PT3S"; std::string const Defaults::PcChecksum = "false"; std::string const Defaults::PcIgnoreQuorum = "false"; std::string const Defaults::PcIgnoreSb = PcIgnoreQuorum; std::string const Defaults::PcNpvo = "false"; std::string const Defaults::PcVersion = "0"; std::string const Defaults::PcWaitPrim = "true"; std::string const Defaults::PcWaitPrimTimeout = "PT30S"; std::string const Defaults::PcWeight = "1"; std::string const Defaults::PcRecovery = "1"; } galera-3-25.3.20/gcomm/src/view.cpp0000644000015300001660000002101013042054732016477 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #include "common/common.h" #include "gcomm/view.hpp" #include "gcomm/types.hpp" #include "gcomm/util.hpp" #include "gu_logger.hpp" #include "gu_exception.hpp" #include #include size_t gcomm::ViewId::unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; gu_trace (off = uuid_.unserialize(buf, buflen, offset)); uint32_t w; gu_trace (off = gu::unserialize4(buf, buflen, off, w)); seq_ = w & 0x3fffffff; type_ = static_cast(w >> 30); return off; } size_t gcomm::ViewId::serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; gcomm_assert(type_ != V_NONE); gu_trace (off = uuid_.serialize(buf, buflen, offset)); uint32_t w((seq_ & 0x3fffffff) | (type_ << 30)); gu_trace (off = gu::serialize4(w, buf, buflen, off)); return off; } static std::string to_string(const gcomm::ViewType type) { switch (type) { case gcomm::V_TRANS: return "TRANS"; case gcomm::V_REG: return "REG"; case gcomm::V_NON_PRIM: return "NON_PRIM"; case gcomm::V_PRIM: return "PRIM"; default: return "UNKNOWN"; // gcomm_throw_fatal << "Invalid type value"; } } std::ostream& gcomm::operator<<(std::ostream& os, const gcomm::ViewId& vi) { return (os << "view_id(" << ::to_string(vi.type()) << "," << vi.uuid() << "," << vi.seq()) << ")"; } void gcomm::View::add_member(const UUID& pid, SegmentId segment) { gu_trace((void)members_.insert_unique(std::make_pair(pid, Node(segment)))); } void gcomm::View::add_members(NodeList::const_iterator begin, NodeList::const_iterator end) { for (NodeList::const_iterator i = begin; i != end; ++i) { gu_trace((void)members_.insert_unique( std::make_pair(NodeList::key(i), NodeList::value(i)))); } } void gcomm::View::add_joined(const UUID& pid, SegmentId segment) { gu_trace((void)joined_.insert_unique(std::make_pair(pid, Node(segment)))); } void gcomm::View::add_left(const UUID& pid, SegmentId segment) { gu_trace((void)left_.insert_unique(std::make_pair(pid, Node(segment)))); } void gcomm::View::add_partitioned(const UUID& pid, SegmentId segment) { gu_trace((void)partitioned_.insert_unique(std::make_pair(pid, Node(segment)))); } const gcomm::NodeList& gcomm::View::members() const { return members_; } const gcomm::NodeList& gcomm::View::joined() const { return joined_; } const gcomm::NodeList& gcomm::View::left() const { return left_; } const gcomm::NodeList& gcomm::View::partitioned() const { return partitioned_; } gcomm::ViewType gcomm::View::type() const { return view_id_.type(); } const gcomm::ViewId& gcomm::View::id() const { return view_id_; } const gcomm::UUID& gcomm::View::representative() const { if (members_.empty()) { return UUID::nil(); } else { return NodeList::key(members_.begin()); } } bool gcomm::View::is_empty() const { return (view_id_.uuid() == UUID::nil() && members_.size() == 0); } bool gcomm::operator==(const gcomm::View& a, const gcomm::View& b) { return (a.id() == b.id() && a.members() == b.members() && a.joined() == b.joined() && a.left() == b.left() && a.partitioned() == b.partitioned()); } std::ostream& gcomm::operator<<(std::ostream& os, const gcomm::View& view) { os << "view("; if (view.is_empty() == true) { os << "(empty)"; } else { os << view.id(); os << " memb {\n"; os << view.members(); os << "} joined {\n"; os << view.joined(); os << "} left {\n"; os << view.left(); os << "} partitioned {\n"; os << view.partitioned(); os << "}"; } os << ")"; return os; } std::ostream& gcomm::View::write_stream(std::ostream& os) const { os << "#vwbeg" << std::endl; os << "view_id: "; view_id_.write_stream(os) << std::endl; os << "bootstrap: " << bootstrap_ << std::endl; for(NodeList::const_iterator it = members_.begin(); it != members_.end(); ++it) { const UUID& uuid(it -> first); const Node& node(it -> second); os << "member: "; uuid.write_stream(os); os << " "; node.write_stream(os) << std::endl; } os << "#vwend" << std::endl; return os; } std::istream& gcomm::View::read_stream(std::istream& is) { std::string line; while(is.good()) { getline(is, line); std::istringstream istr(line); std::string param; istr >> param; if (param == "#vwbeg") continue; else if (param == "#vwend") break; if (param == "view_id:") { view_id_.read_stream(istr); } else if (param == "bootstrap:") { istr >> bootstrap_; } else if (param == "member:") { UUID uuid; Node node(0); uuid.read_stream(istr); node.read_stream(istr); add_member(uuid, node.segment()); } } return is; } std::ostream& gcomm::ViewState::write_stream(std::ostream& os) const { os << "my_uuid: "; my_uuid_.write_stream(os); os << std::endl; view_.write_stream(os); return os; } std::istream& gcomm::ViewState::read_stream(std::istream& is) { std::string param; std::string line; while(is.good()) { getline(is, line); std::istringstream istr(line); istr >> param; if (param == "my_uuid:") { my_uuid_.read_stream(istr); } else if (param == "#vwbeg") { // read from next line. view_.read_stream(is); } } return is; } std::string gcomm::ViewState::get_viewstate_file_name(gu::Config& conf) { std::string dir_name = COMMON_BASE_DIR_DEFAULT; try { // If base_dir is set in the configuration we should use // it instead of current directory default. dir_name = conf.get(COMMON_BASE_DIR_KEY, dir_name); } catch (const gu::NotFound &) { // In case it is not known we do not have to do // anything and use default. } return dir_name + '/' + COMMON_VIEW_STAT_FILE; } void gcomm::ViewState::write_file() const { // write to temporary file first. std::string tmp(file_name_ + ".tmp"); FILE* fout = fopen(tmp.c_str(), "w"); if (fout == NULL) { log_warn << "open file(" << tmp << ") failed(" << strerror(errno) << ")"; return ; } std::ostringstream os; try { write_stream(os); } catch (const std::exception& e) { log_warn << "write ostringstream failed(" << e.what() << ")"; fclose(fout); return ; } std::string content(os.str()); if (fwrite(content.c_str(), content.size(), 1, fout) == 0) { log_warn << "write file(" << tmp << ") failed(" << strerror(errno) << ")"; fclose(fout); return ; } // fflush is called inside. if (fclose(fout) != 0){ log_warn << "close file(" << tmp << ") failed(" << strerror(errno) << ")"; return ; } // rename atomically. if (rename(tmp.c_str(), file_name_.c_str()) != 0) { log_warn << "rename file(" << tmp << ") to file(" << file_name_ << ") failed(" << strerror(errno) << ")"; } } bool gcomm::ViewState::read_file() { if (access(file_name_.c_str(), R_OK) != 0) { log_warn << "access file(" << file_name_ << ") failed(" << strerror(errno) << ")"; return false; } try { std::ifstream ifs(file_name_.c_str(), std::ifstream::in); read_stream(ifs); ifs.close(); return true; } catch (const std::exception& e) { log_warn << "read file(" << file_name_ << ") failed(" << e.what() << ")"; return false; } } // remove_file is static function, it should remove the view // state file even if there is no ViewState object around. // View state file name is derived in the same way as for // ViewState object. void gcomm::ViewState::remove_file(gu::Config& conf) { std::string file_name = get_viewstate_file_name(conf); (void) unlink(file_name.c_str()); } galera-3-25.3.20/gcomm/src/SConscript0000644000015300001660000000216513042054732017045 0ustar jenkinsjenkins# Import('env') libgcomm_env = env.Clone() # Include paths libgcomm_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcomm/src ''')) libgcomm_env.Append(CXXFLAGS = ' -fno-strict-aliasing') libgcomm_sources = [ 'conf.cpp', 'defaults.cpp', 'datagram.cpp', 'evs_consensus.cpp', 'evs_input_map2.cpp', 'evs_message2.cpp', 'evs_node.cpp', 'evs_proto.cpp', 'gmcast.cpp', 'gmcast_proto.cpp', 'pc.cpp', 'pc_proto.cpp', 'protonet.cpp', 'protostack.cpp', 'transport.cpp', 'uuid.cpp', 'view.cpp', 'socket.cpp'] if '-DHAVE_ASIO_HPP' in libgcomm_env['CPPFLAGS']: # ASIO sources need to be built with relaxed C++ flags libgcomm_sources.extend([ 'asio_tcp.cpp', 'asio_udp.cpp', 'asio_protonet.cpp']) libgcomm_env.StaticLibrary('gcomm', libgcomm_sources) env.Append(LIBGALERA_OBJS = libgcomm_env.SharedObject(libgcomm_sources)) galera-3-25.3.20/gcomm/src/pc_proto.cpp0000644000015300001660000014706213042054732017372 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "pc_proto.hpp" #include "pc_message.hpp" #include "gcomm/util.hpp" #include "gu_logger.hpp" #include "gu_macros.h" #include #include using std::rel_ops::operator!=; using std::rel_ops::operator>; // // Helpers // class SelectPrimOp { public: SelectPrimOp(gcomm::pc::Proto::SMMap& states) : states_(states) { } void operator()(const gcomm::pc::Proto::SMMap::value_type& vt) const { const gcomm::UUID& uuid(gcomm::pc::Proto::SMMap::key(vt)); const gcomm::pc::Message& msg(gcomm::pc::Proto::SMMap::value(vt)); const gcomm::pc::NodeMap& nm(msg.node_map()); gcomm::pc::NodeMap::const_iterator nm_i(nm.find(uuid)); if (nm_i == nm.end()) { gu_throw_error(EPROTO) << "protocol error, self not found from " << uuid << " state msg node list"; } if (gcomm::pc::NodeMap::value(nm_i).prim() == true) { states_.insert(vt); } } private: gcomm::pc::Proto::SMMap& states_; }; class ToSeqCmpOp { public: bool operator()(const gcomm::pc::Proto::SMMap::value_type& a, const gcomm::pc::Proto::SMMap::value_type& b) const { const gcomm::pc::Node& astate( gcomm::pc::NodeMap::value( gcomm::pc::Proto::SMMap::value(a).node_map() .find_checked(gcomm::pc::Proto::SMMap::key(a)))); const gcomm::pc::Node& bstate( gcomm::pc::NodeMap::value( gcomm::pc::Proto::SMMap::value(b).node_map() .find_checked(gcomm::pc::Proto::SMMap::key(b)))); return (astate.to_seq() < bstate.to_seq()); } }; // Return max to seq found from states, -1 if states is empty static int64_t get_max_to_seq(const gcomm::pc::Proto::SMMap& states) { if (states.empty() == true) return -1; gcomm::pc::Proto::SMMap::const_iterator max_i( max_element(states.begin(), states.end(), ToSeqCmpOp())); const gcomm::pc::Node& state( gcomm::pc::Proto::SMMap::value(max_i).node( gcomm::pc::Proto::SMMap::key(max_i))); return state.to_seq(); } static void checksum(gcomm::pc::Message& msg, gcomm::Datagram& dg) { uint16_t crc16(gcomm::crc16(dg, 4)); msg.checksum(crc16, true); gcomm::pop_header(msg, dg); gcomm::push_header(msg, dg); } static void test_checksum(gcomm::pc::Message& msg, const gcomm::Datagram& dg, size_t offset) { uint16_t msg_crc16(msg.checksum()); uint16_t crc16(gcomm::crc16(dg, offset + 4)); if (crc16 != msg_crc16) { gu_throw_fatal << "Message checksum failed"; } } std::ostream& gcomm::pc::operator<<(std::ostream& os, const gcomm::pc::Proto& p) { os << "pc::Proto{"; os << "uuid=" << p.my_uuid_ << ","; os << "start_prim=" << p.start_prim_ << ","; os << "npvo=" << p.npvo_ << ","; os << "ignore_sb=" << p.ignore_sb_ << ","; os << "ignore_quorum=" << p.ignore_quorum_ << ","; os << "state=" << p.state_ << ","; os << "last_sent_seq=" << p.last_sent_seq_ << ","; os << "checksum=" << p.checksum_ << ","; os << "instances=\n" << p.instances_ << ","; os << "state_msgs=\n" << p.state_msgs_ << ","; os << "current_view=" << p.current_view_ << ","; os << "pc_view=" << p.pc_view_ << ","; // os << "views=" << p.views_ << ","; os << "mtu=" << p.mtu_ << "}"; return os; } // // // void gcomm::pc::Proto::send_state() { log_debug << self_id() << " sending state"; StateMessage pcs(current_view_.version()); NodeMap& im(pcs.node_map()); for (NodeMap::iterator i = instances_.begin(); i != instances_.end(); ++i) { // Assume all nodes in the current view have reached current to_seq Node& local_state(NodeMap::value(i)); if (current_view_.is_member(NodeMap::key(i)) == true) { local_state.set_to_seq(to_seq()); } if (is_evicted(NodeMap::key(i)) == true) { local_state.set_evicted(true); } im.insert_unique(std::make_pair(NodeMap::key(i), local_state)); } log_debug << self_id() << " local to seq " << to_seq(); log_debug << self_id() << " sending state: " << pcs; gu::Buffer buf; serialize(pcs, buf); Datagram dg(buf); if (send_down(dg, ProtoDownMeta())) { gu_throw_fatal << "pass down failed"; } } void gcomm::pc::Proto::send_install(bool bootstrap, int weight) { gcomm_assert(bootstrap == false || weight == -1); log_debug << self_id() << " send install"; InstallMessage pci(current_view_.version()); NodeMap& im(pci.node_map()); for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { if (current_view_.members().find(SMMap::key(i)) != current_view_.members().end()) { gu_trace( im.insert_unique( std::make_pair( SMMap::key(i), SMMap::value(i).node((SMMap::key(i)))))); } } if (bootstrap == true) { pci.flags(pci.flags() | InstallMessage::F_BOOTSTRAP); log_debug << self_id() << " sending PC bootstrap message " << pci; } else if (weight != -1) { pci.flags(pci.flags() | InstallMessage::F_WEIGHT_CHANGE); Node& self(pci.node(uuid())); self.set_weight(weight); log_info << self_id() << " sending PC weight change message " << pci; } else { log_debug << self_id() << " sending install: " << pci; } gu::Buffer buf; serialize(pci, buf); Datagram dg(buf); int ret = send_down(dg, ProtoDownMeta()); if (ret != 0) { log_warn << self_id() << " sending install message failed: " << strerror(ret); } } void gcomm::pc::Proto::deliver_view(bool bootstrap) { View v(pc_view_.version(), pc_view_.id(), bootstrap); for (NodeMap::const_iterator i = instances_.begin(); i != instances_.end(); ++i) { if (current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { v.add_partitioned(NodeMap::key(i), NodeMap::value(i).segment()); } else { v.add_member(NodeMap::key(i), NodeMap::value(i).segment()); } } ProtoUpMeta um(UUID::nil(), ViewId(), &v); log_info << v; send_up(Datagram(), um); set_stable_view(v); if (v.id().type() == V_NON_PRIM && rst_view_ && !start_prim_) { // pc recovery process. uint32_t max_view_seqno = 0; bool check = true; for(NodeMap::const_iterator i = instances_.begin(); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); // just consider property of nodes in restored view. if (rst_view_ -> members().find(uuid) != rst_view_ -> members().end()) { const Node& node(NodeMap::value(i)); const ViewId& last_prim(node.last_prim()); if (last_prim.type() != V_NON_PRIM || last_prim.uuid() != rst_view_ -> id().uuid()) { log_warn << "node uuid: " << uuid << " last_prim(type: " << last_prim.type() << ", uuid: " << last_prim.uuid() << ") is inconsistent to " << "restored view(type: V_NON_PRIM, uuid: " << rst_view_ ->id().uuid(); check = false; break; } max_view_seqno = std::max(max_view_seqno, last_prim.seq()); } } if (check) { assert(max_view_seqno != 0); log_debug << "max_view_seqno = " << max_view_seqno << ", rst_view_seqno = " << rst_view_ -> id().seq(); log_debug << "rst_view = "; log_debug << *rst_view_; log_debug << "deliver_view = "; log_debug << v; if (rst_view_ -> id().seq() == max_view_seqno && rst_view_ -> members() == v.members()) { log_info << "promote to primary component"; // since all of them are non-primary component // we need to bootstrap. send_install(true); // clear rst_view after pc is formed, otherwise // there would be network partition when sending // install message. and if rst_view is cleared here, // then pc recovery will never happen again. } } } // if pc is formed by normal process(start_prim_=true) instead of // pc recovery process, rst_view_ won't be clear. // however this will prevent pc remerge(see is_prim function) // so we have to clear rst_view_ once pc is formed.. if (v.id().type() == V_PRIM && rst_view_) { log_info << "clear restored view"; rst_view_ = NULL; } } void gcomm::pc::Proto::mark_non_prim() { pc_view_ = View(current_view_.version(), ViewId(V_NON_PRIM, current_view_.id())); for (NodeMap::iterator i = instances_.begin(); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); Node& inst(NodeMap::value(i)); if (current_view_.members().find(uuid) != current_view_.members().end()) { inst.set_prim(false); pc_view_.add_member(uuid, inst.segment()); } } set_prim(false); } void gcomm::pc::Proto::shift_to(const State s) { // State graph static const bool allowed[S_MAX][S_MAX] = { // Closed { false, false, false, false, false, true }, // States exch { true, false, true, false, true, true }, // Install { true, false, false, true, true, true }, // Prim { true, false, false, false, true, true }, // Trans { true, true, false, false, false, true }, // Non-prim { true, false, false, true, true, true } }; if (allowed[state()][s] == false) { gu_throw_fatal << "Forbidden state transition: " << to_string(state()) << " -> " << to_string(s); } switch (s) { case S_CLOSED: break; case S_STATES_EXCH: state_msgs_.clear(); break; case S_INSTALL: break; case S_PRIM: { pc_view_ = View(current_view_.version(), ViewId(V_PRIM, current_view_.id())); for (NodeMap::iterator i = instances_.begin(); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); Node& inst(NodeMap::value(i)); NodeList::const_iterator nli; if ((nli = current_view_.members().find(uuid)) != current_view_.members().end()) { inst.set_prim(true); inst.set_last_prim(ViewId(V_PRIM, current_view_.id())); inst.set_last_seq(0); inst.set_to_seq(to_seq()); pc_view_.add_member(uuid, inst.segment()); } else { inst.set_prim(false); } } last_sent_seq_ = 0; set_prim(true); break; } case S_TRANS: break; case S_NON_PRIM: mark_non_prim(); break; default: ; } log_debug << self_id() << " shift_to: " << to_string(state()) << " -> " << to_string(s) << " prim " << prim() << " last prim " << last_prim() << " to_seq " << to_seq(); state_ = s; } void gcomm::pc::Proto::handle_first_trans(const View& view) { gcomm_assert(state() == S_NON_PRIM); gcomm_assert(view.type() == V_TRANS); if (start_prim_ == true) { if (view.members().size() > 1 || view.is_empty()) { gu_throw_fatal << "Corrupted view"; } if (NodeList::key(view.members().begin()) != uuid()) { gu_throw_fatal << "Bad first UUID: " << NodeList::key(view.members().begin()) << ", expected: " << uuid(); } set_last_prim(ViewId(V_PRIM, view.id())); set_prim(true); } current_view_ = view; shift_to(S_TRANS); } // Compute weighted sum of members in node list. If member cannot be found // from node_map its weight is assumed to be zero. static size_t weighted_sum(const gcomm::NodeList& node_list, const gcomm::pc::NodeMap& node_map) { size_t sum(0); for (gcomm::NodeList::const_iterator i(node_list.begin()); i != node_list.end(); ++i) { int weight(0); gcomm::pc::NodeMap::const_iterator node_i( node_map.find(gcomm::NodeList::key(i))); if (node_i != node_map.end()) { const gcomm::pc::Node& node(gcomm::pc::NodeMap::value(node_i)); gcomm_assert(node.weight() >= 0 && node.weight() <= 0xff); weight = node.weight(); } else { weight = 0; } sum += weight; } return sum; } // Check if all members in node_list have weight associated. This is needed // to fall back to backwards compatibility mode during upgrade (all weights are // assumed to be one). See have_quorum() and have_split_brain() below. static bool have_weights(const gcomm::NodeList& node_list, const gcomm::pc::NodeMap& node_map) { for (gcomm::NodeList::const_iterator i(node_list.begin()); i != node_list.end(); ++i) { gcomm::pc::NodeMap::const_iterator node_i( node_map.find(gcomm::NodeList::key(i))); if (node_i != node_map.end()) { const gcomm::pc::Node& node(gcomm::pc::NodeMap::value(node_i)); if (node.weight() == -1) { return false; } } } return true; } bool gcomm::pc::Proto::have_quorum(const View& view, const View& pc_view) const { if (have_weights(view.members(), instances_) && have_weights(view.left(), instances_) && have_weights(pc_view.members(), instances_)) { return (weighted_sum(view.members(), instances_) * 2 + weighted_sum(view.left(), instances_) > weighted_sum(pc_view.members(), instances_)); } else { return (view.members().size()*2 + view.left().size() > pc_view.members().size()); } } bool gcomm::pc::Proto::have_split_brain(const View& view) const { if (have_weights(view.members(), instances_) && have_weights(view.left(), instances_) && have_weights(pc_view_.members(), instances_)) { return (weighted_sum(view.members(), instances_) * 2 + weighted_sum(view.left(), instances_) == weighted_sum(pc_view_.members(), instances_)); } else { return (view.members().size()*2 + view.left().size() == pc_view_.members().size()); } } void gcomm::pc::Proto::handle_trans(const View& view) { gcomm_assert(view.id().type() == V_TRANS); gcomm_assert(view.id().uuid() == current_view_.id().uuid() && view.id().seq() == current_view_.id().seq()); gcomm_assert(view.version() == current_view_.version()); log_debug << self_id() << " \n\n current view " << current_view_ << "\n\n next view " << view << "\n\n pc view " << pc_view_; if (have_quorum(view, pc_view_) == false) { if (closing_ == false && ignore_sb_ == true && have_split_brain(view)) { // configured to ignore split brain log_warn << "Ignoring possible split-brain " << "(allowed by configuration) from view:\n" << current_view_ << "\nto view:\n" << view; } else if (closing_ == false && ignore_quorum_ == true) { // configured to ignore lack of quorum log_warn << "Ignoring lack of quorum " << "(allowed by configuration) from view:\n" << current_view_ << "\nto view:\n" << view; } else { current_view_ = view; // shift_to(S_NON_PRIM); mark_non_prim(); deliver_view(); shift_to(S_TRANS); return; } } else { log_debug << self_id() << " quorum ok"; } current_view_ = view; shift_to(S_TRANS); } void gcomm::pc::Proto::handle_reg(const View& view) { gcomm_assert(view.type() == V_REG); gcomm_assert(state() == S_TRANS); if (view.is_empty() == false && view.id().seq() <= current_view_.id().seq()) { gu_throw_fatal << "Non-increasing view ids: current view " << current_view_.id() << " new view " << view.id(); } if (current_view_.version() < view.version()) { log_info << "PC protocol upgrade " << current_view_.version() << " -> " << view.version(); } else if (current_view_.version() > view.version()) { log_info << "PC protocol downgrade " << current_view_.version() << " -> " << view.version(); } current_view_ = view; views_.push_back(current_view_); if (current_view_.is_empty() == true) { shift_to(S_NON_PRIM); deliver_view(); shift_to(S_CLOSED); } else { shift_to(S_STATES_EXCH); send_state(); } } void gcomm::pc::Proto::handle_view(const View& view) { // We accept only EVS TRANS and REG views if (view.type() != V_TRANS && view.type() != V_REG) { gu_throw_fatal << "Invalid view type"; } // Make sure that self exists in view if (view.is_empty() == false && view.is_member(uuid()) == false) { gu_throw_fatal << "Self not found from non empty view: " << view; } log_debug << self_id() << " " << view; if (view.type() == V_TRANS) { if (current_view_.type() == V_NONE) { handle_first_trans(view); } else { handle_trans(view); } } else { handle_reg(view); } } // Validate state message agains local state void gcomm::pc::Proto::validate_state_msgs() const { // #622, #638 Compute max TO seq among states from prim SMMap prim_state_msgs; std::for_each(state_msgs_.begin(), state_msgs_.end(), SelectPrimOp(prim_state_msgs)); const int64_t max_to_seq(get_max_to_seq(prim_state_msgs)); for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const UUID& msg_source_uuid(SMMap::key(i)); const Node& msg_source_state(SMMap::value(i).node(msg_source_uuid)); const NodeMap& msg_state_map(SMMap::value(i).node_map()); for (NodeMap::const_iterator si = msg_state_map.begin(); si != msg_state_map.end(); ++si) { const UUID& uuid(NodeMap::key(si)); const Node& msg_state(NodeMap::value(si)); const Node& local_state(NodeMap::value(instances_.find_checked(uuid))); if (prim() == true && msg_source_state.prim() == true && msg_state.prim() == true) { if (current_view_.is_member(uuid) == true) { // Msg source claims to come from prim view and this node // is in prim. All message prim view states must be equal // to local ones. if (msg_state.weight() == -1) { // backwards compatibility, ignore weight in state check gcomm_assert( msg_state.prim() == local_state.prim() && msg_state.last_seq() == local_state.last_seq() && msg_state.last_prim() == local_state.last_prim() && msg_state.to_seq() == local_state.to_seq()) << self_id() << " node " << uuid << " prim state message and local states not consistent:" << " msg node " << msg_state << " local state " << local_state; } else { gcomm_assert(msg_state == local_state) << self_id() << " node " << uuid << " prim state message and local states not consistent:" << " msg node " << msg_state << " local state " << local_state; } gcomm_assert(msg_state.to_seq() == max_to_seq) << self_id() << " node " << uuid << " to seq not consistent with local state:" << " max to seq " << max_to_seq << " msg state to seq " << msg_state.to_seq(); } } else if (prim() == true) { log_debug << self_id() << " node " << uuid << " from " << msg_state.last_prim() << " joining " << last_prim(); } else if (msg_state.prim() == true) { // @todo: Cross check with other state messages coming from prim log_debug << self_id() << " joining to " << msg_state.last_prim(); } } } } // @note This method is currently for sanity checking only. RTR is not // implemented yet. bool gcomm::pc::Proto::requires_rtr() const { bool ret = false; // Find maximum reported to_seq const int64_t max_to_seq(get_max_to_seq(state_msgs_)); for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { NodeMap::const_iterator ii( SMMap::value(i).node_map().find_checked(SMMap::key(i))); const Node& inst = NodeMap::value(ii); const int64_t to_seq = inst.to_seq(); const ViewId last_prim = inst.last_prim(); if (to_seq != -1 && to_seq != max_to_seq && last_prim.type() != V_NON_PRIM) { log_debug << self_id() << " RTR is needed: " << to_seq << " / " << last_prim; ret = true; } } return ret; } void gcomm::pc::Proto::cleanup_instances() { gcomm_assert(state() == S_PRIM); gcomm_assert(current_view_.type() == V_REG); NodeMap::iterator i, i_next; for (i = instances_.begin(); i != instances_.end(); i = i_next) { i_next = i, ++i_next; const UUID& uuid(NodeMap::key(i)); if (current_view_.is_member(uuid) == false) { log_debug << self_id() << " cleaning up instance " << uuid; instances_.erase(i); } else { // Clear unknow status from nodes in current view here. // New PC has been installed and if paritioning happens, // we either know for sure that the other partitioned component ends // up in non-prim, or in other case we have valid PC view to // deal with in case of remerge. NodeMap::value(i).set_un(false); } } } bool gcomm::pc::Proto::is_prim() const { bool prim(false); ViewId last_prim(V_NON_PRIM); int64_t to_seq(-1); // Check if any of instances claims to come from prim view for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const Node& state(SMMap::value(i).node(SMMap::key(i))); if (state.prim() == true) { log_info << "Node " << SMMap::key(i) << " state prim"; prim = true; last_prim = state.last_prim(); to_seq = state.to_seq(); break; } } // Verify that all members are either coming from the same prim // view or from non-prim for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const Node& state(SMMap::value(i).node(SMMap::key(i))); if (state.prim() == true) { if (state.last_prim() != last_prim) { gu_throw_fatal << self_id() << " last prims not consistent"; } if (state.to_seq() != to_seq) { gu_throw_fatal << self_id() << " TO seqs not consistent"; } } else { log_debug << "Non-prim " << SMMap::key(i) <<" from " << state.last_prim() << " joining prim"; } } // No members coming from prim view, check if last known prim // view can be recovered (majority of members from last prim alive) if (prim == false) { gcomm_assert(last_prim == ViewId(V_NON_PRIM)) << last_prim << " != " << ViewId(V_NON_PRIM); // First determine if there are any nodes still in unknown state. std::set un; for (NodeMap::const_iterator i(instances_.begin()); i != instances_.end(); ++i) { if (NodeMap::value(i).un() == true && current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { un.insert(NodeMap::key(i)); } } if (un.empty() == false) { std::ostringstream oss; std::copy(un.begin(), un.end(), std::ostream_iterator(oss, " ")); log_info << "Nodes " << oss.str() << "are still in unknown state, " << "unable to rebootstrap new prim"; return false; } // Collect last prim members and evicted from state messages MultiMap last_prim_uuids; std::set evicted; for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { for (NodeMap::const_iterator j = SMMap::value(i).node_map().begin(); j != SMMap::value(i).node_map().end(); ++j) { const UUID& uuid(NodeMap::key(j)); const Node& inst(NodeMap::value(j)); if (inst.last_prim().type() != V_NON_PRIM && std::find::iterator, std::pair >( last_prim_uuids.begin(), last_prim_uuids.end(), std::make_pair(inst.last_prim(), uuid)) == last_prim_uuids.end()) { last_prim_uuids.insert(std::make_pair(inst.last_prim(), uuid)); } if (inst.evicted() == true) { evicted.insert(uuid); } } } if (last_prim_uuids.empty() == true) { log_warn << "no nodes coming from prim view, prim not possible"; return false; } // Construct greatest view set of UUIDs ignoring evicted ones std::set greatest_view; // Get range of UUIDs in greatest views const ViewId greatest_view_id(last_prim_uuids.rbegin()->first); std::pair::const_iterator, MultiMap::const_iterator> gvi = last_prim_uuids.equal_range(greatest_view_id); // Iterate over range and insert into greatest view if not evicted for (MultiMap::const_iterator i = gvi.first; i != gvi.second; ++i) { if (evicted.find(MultiMap::value(i)) == evicted.end()) { std::pair::iterator, bool> iret = greatest_view.insert( MultiMap::value(i)); // Assert that inserted UUID was unique gcomm_assert(iret.second == true); } } log_debug << self_id() << " greatest view id " << greatest_view_id; // Compute list of present view members std::set present; for (NodeList::const_iterator i = current_view_.members().begin(); i != current_view_.members().end(); ++i) { present.insert(NodeList::key(i)); } // Compute intersection of present and greatest view. If the // intersection size is the same as greatest view size, // it is safe to rebootstrap PC. std::set intersection; set_intersection(greatest_view.begin(), greatest_view.end(), present.begin(), present.end(), inserter(intersection, intersection.begin())); log_debug << self_id() << " intersection size " << intersection.size() << " greatest view size " << greatest_view.size(); if (intersection.size() == greatest_view.size()) { log_info << "re-bootstrapping prim from partitioned components"; prim = true; } } return prim; } void gcomm::pc::Proto::handle_state(const Message& msg, const UUID& source) { gcomm_assert(msg.type() == Message::T_STATE); gcomm_assert(state() == S_STATES_EXCH); gcomm_assert(state_msgs_.size() < current_view_.members().size()); log_debug << self_id() << " handle state from " << source << " " << msg; // Early check for possibly conflicting primary components. The one // with greater view id may continue (as it probably has been around // for longer timer). However, this should be configurable policy. if (prim() == true) { const Node& si(NodeMap::value(msg.node_map().find(source))); if (si.prim() == true && si.last_prim() != last_prim()) { log_warn << self_id() << " conflicting prims: my prim: " << last_prim() << " other prim: " << si.last_prim(); if ((npvo_ == true && last_prim() < si.last_prim()) || (npvo_ == false && last_prim() > si.last_prim())) { log_warn << self_id() << " discarding other prim view: " << (npvo_ == true ? "newer" : "older" ) << " overrides"; return; } else { gu_throw_fatal << self_id() << " aborting due to conflicting prims: " << (npvo_ == true ? "newer" : "older" ) << " overrides"; } } } state_msgs_.insert_unique(std::make_pair(source, msg)); if (state_msgs_.size() == current_view_.members().size()) { // Insert states from previously unseen nodes into local state map for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const NodeMap& sm_im(SMMap::value(i).node_map()); for (NodeMap::const_iterator j = sm_im.begin(); j != sm_im.end(); ++j) { const UUID& sm_uuid(NodeMap::key(j)); const Node& sm_node(NodeMap::value(j)); NodeMap::iterator local_node_i(instances_.find(sm_uuid)); if (local_node_i == instances_.end()) { const Node& sm_state(NodeMap::value(j)); instances_.insert_unique(std::make_pair(sm_uuid, sm_state)); } else { Node& local_node(NodeMap::value(local_node_i)); if (local_node.weight() == -1 && sm_node.weight() != -1) { // backwards compatibility: override weight for // instances which have been reported by old nodes // but have weights associated anyway local_node.set_weight(sm_node.weight()); } else if (local_node.weight() != sm_node.weight() && SMMap::key(i) == NodeMap::key(local_node_i)) { log_warn << self_id() << "overriding reported weight for " << NodeMap::key(local_node_i); local_node.set_weight(sm_node.weight()); } if (prim() == false && sm_node.un() == true && // note #92 local_node_i != self_i_) { // If coming from non-prim, set local instance status // to unknown if any of the state messages has it // marked unknown. If coming from prim, there is // no need to set this as it is known if the node // corresponding to local instance is in primary. local_node.set_un(true); } } } } // Validate that all state messages are consistent before proceeding gu_trace(validate_state_msgs()); if (is_prim() == true) { // @note Requires RTR does not actually have effect, but let it // be for debugging purposes until a while (void)requires_rtr(); shift_to(S_INSTALL); if (current_view_.members().find(uuid()) == current_view_.members().begin()) { send_install(false); } } else { // #571 Deliver NON-PRIM views in all cases. shift_to(S_NON_PRIM); deliver_view(); } } } void gcomm::pc::Proto::handle_install(const Message& msg, const UUID& source) { if (state() == S_PRIM) { if ((msg.flags() & Message::F_WEIGHT_CHANGE) == 0) { log_warn << "non weight changing install in S_PRIM: " << msg; } else { NodeMap::iterator local_i(instances_.find(source)); const Node& msg_n(msg.node(source)); log_info << self_id() << " changing node " << source << " weight (reg) " << NodeMap::value(local_i).weight() << " -> " << msg_n.weight(); NodeMap::value(local_i).set_weight(msg_n.weight()); if (source == uuid()) { conf_.set(gcomm::Conf::PcWeight, gu::to_string(msg_n.weight())); } } return; } else if (state() == S_TRANS) { handle_trans_install(msg, source); return; } gcomm_assert(msg.type() == Message::T_INSTALL); gcomm_assert(state() == S_INSTALL || state() == S_NON_PRIM); if ((msg.flags() & Message::F_BOOTSTRAP) == 0) { log_debug << self_id() << " handle install from " << source << " " << msg; } else { log_debug << self_id() << " handle bootstrap install from " << source << " " << msg; if (state() == S_INSTALL) { log_info << "ignoring bootstrap install in " << to_string(state()) << " state"; return; } } // Validate own state NodeMap::const_iterator mi(msg.node_map().find_checked(uuid())); const Node& m_state(NodeMap::value(mi)); if (m_state.weight() == -1) { // backwards compatibility, ignore weight in state check const Node& self_state(NodeMap::value(self_i_)); if ((m_state.prim() == self_state.prim() && m_state.last_seq() == self_state.last_seq() && m_state.last_prim() == self_state.last_prim() && m_state.to_seq() == self_state.to_seq()) == false) { gu_throw_fatal << self_id() << "Install message self state does not match, " << "message state: " << m_state << ", local state: " << NodeMap::value(self_i_); } } else { if (m_state != NodeMap::value(self_i_)) { gu_throw_fatal << self_id() << "Install message self state does not match, " << "message state: " << m_state << ", local state: " << NodeMap::value(self_i_); } } // Set TO seqno according to install message int64_t to_seq(-1); bool prim_found(false); for (mi = msg.node_map().begin(); mi != msg.node_map().end(); ++mi) { const Node& m_state = NodeMap::value(mi); // check that all TO seqs coming from prim are same if (m_state.prim() == true && to_seq != -1) { if (m_state.to_seq() != to_seq) { gu_throw_fatal << "Install message TO seqnos inconsistent"; } } if (m_state.prim() == true) { prim_found = true; to_seq = std::max(to_seq, m_state.to_seq()); } } if (prim_found == false) { // #277 // prim comp was restored from non-prims, find out max known TO seq for (mi = msg.node_map().begin(); mi != msg.node_map().end(); ++mi) { const Node& m_state = NodeMap::value(mi); to_seq = std::max(to_seq, m_state.to_seq()); } log_debug << "assigning TO seq to " << to_seq << " after restoring prim"; } log_debug << self_id() << " setting TO seq to " << to_seq; set_to_seq(to_seq); shift_to(S_PRIM); deliver_view(msg.flags() & Message::F_BOOTSTRAP); cleanup_instances(); } namespace { class ViewUUIDLT { public: bool operator()(const gcomm::NodeList::value_type& a, const gcomm::NodeList::value_type& b) const { return (a.first < b.first); } }; } // When delivering install message in trans view quorum has to be re-evaluated // as the partitioned component may have installed prim view due to higher // weight. To do this, we construct pc view that would have been installed // if install message was delivered in reg view and make quorum computation // against it. // // It is not actually known if partitioned component installed new PC, so // we mark partitioned nodes states as unknown. This is to provide deterministic // way to prevent automatic rebootstrapping of PC if some of the seen nodes // is in unknown state. void gcomm::pc::Proto::handle_trans_install(const Message& msg, const UUID& source) { gcomm_assert(msg.type() == Message::T_INSTALL); gcomm_assert(state() == S_TRANS); gcomm_assert(current_view_.type() == V_TRANS); if ((msg.flags() & Message::F_BOOTSTRAP) != 0) { log_info << "Dropping bootstrap install in TRANS state"; return; } gcomm_assert(have_quorum(current_view_, pc_view_) == true); if ((msg.flags() & Message::F_WEIGHT_CHANGE) != 0) { NodeList nl; nl.insert(current_view_.members().begin(), current_view_.members().end()); nl.insert(current_view_.left().begin(), current_view_.left().end()); if (std::includes(nl.begin(), nl.end(), pc_view_.members().begin(), pc_view_.members().end(), ViewUUIDLT()) == false) { // Weight changing install message delivered in trans view // and previous pc view has partitioned. // // Need to be very conservative: We don't know what happened to // weight change message in partitioned component, so it may not be // safe to do quorum calculation. Shift to non-prim and // wait until partitioned component comes back (or prim is // rebootstrapped). // // It would be possible to do more fine grained decisions // based on the source of the message, but to keep things simple // always go to non-prim, this is very cornerish case after all. log_info << "Weight changing trans install leads to non-prim"; mark_non_prim(); deliver_view(); for (NodeMap::const_iterator i(msg.node_map().begin()); i != msg.node_map().end(); ++i) { if (current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { NodeMap::iterator local_i(instances_.find(NodeMap::key(i))); if (local_i == instances_.end()) { log_warn << "Node " << NodeMap::key(i) << " not found from instances"; } else { if (NodeMap::key(i) == source) { NodeMap::value(local_i).set_weight( NodeMap::value(i).weight()); if (source == uuid()) { conf_.set(gcomm::Conf::PcWeight, gu::to_string(NodeMap::value(i).weight())); } } NodeMap::value(local_i).set_un(true); } } } } else { NodeMap::iterator local_i(instances_.find(source)); const Node& msg_n(msg.node(source)); log_info << self_id() << " changing node " << source << " weight (trans) " << NodeMap::value(local_i).weight() << " -> " << msg_n.weight(); NodeMap::value(local_i).set_weight(msg_n.weight()); if (source == uuid()) { conf_.set(gcomm::Conf::PcWeight, gu::to_string(msg_n.weight())); } } } else { View new_pc_view(current_view_.version(), ViewId(V_PRIM, current_view_.id())); for (NodeMap::iterator i(instances_.begin()); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); NodeMap::const_iterator ni(msg.node_map().find(uuid)); if (ni != msg.node_map().end()) { new_pc_view.add_member(uuid, 0); } } if (have_quorum(current_view_, new_pc_view) == false || pc_view_.type() == V_NON_PRIM) { log_info << "Trans install leads to non-prim"; mark_non_prim(); deliver_view(); // Mark all nodes in install msg node map but not in current // view with unknown status. It is not known if they delivered // install message in reg view and so formed new PC. for (NodeMap::const_iterator i(msg.node_map().begin()); i != msg.node_map().end(); ++i) { if (current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { NodeMap::iterator local_i(instances_.find(NodeMap::key(i))); if (local_i == instances_.end()) { log_warn << "Node " << NodeMap::key(i) << " not found from instances"; } else { NodeMap::value(local_i).set_un(true); } } } } } } void gcomm::pc::Proto::handle_user(const Message& msg, const Datagram& dg, const ProtoUpMeta& um) { int64_t curr_to_seq(-1); if (prim() == true) { if (um.order() == O_SAFE) { set_to_seq(to_seq() + 1); curr_to_seq = to_seq(); } } else if (current_view_.members().find(um.source()) == current_view_.members().end()) { gcomm_assert(current_view_.type() == V_TRANS); // log_debug << self_id() // << " dropping message from out of view source in non-prim"; return; } if (um.order() == O_SAFE) { Node& state(NodeMap::value(instances_.find_checked(um.source()))); if (state.last_seq() + 1 != msg.seq()) { gu_throw_fatal << "gap in message sequence: source=" << um.source() << " expected_seq=" << state.last_seq() + 1 << " seq=" << msg.seq(); } state.set_last_seq(msg.seq()); } Datagram up_dg(dg, dg.offset() + msg.serial_size()); gu_trace(send_up(up_dg, ProtoUpMeta(um.source(), pc_view_.id(), 0, um.user_type(), um.order(), curr_to_seq))); } void gcomm::pc::Proto::handle_msg(const Message& msg, const Datagram& rb, const ProtoUpMeta& um) { // EVS provides send view delivery, so this assertion // should always hold. assert(msg.version() == current_view_.version()); enum Verdict { ACCEPT, DROP, FAIL }; static const Verdict verdicts[S_MAX][Message::T_MAX] = { // Msg types // NONE, STATE, INSTALL, USER { FAIL, FAIL, FAIL, FAIL }, // Closed { FAIL, ACCEPT, FAIL, FAIL }, // States exch { FAIL, FAIL, ACCEPT, FAIL }, // INSTALL { FAIL, FAIL, ACCEPT, ACCEPT }, // PRIM { FAIL, DROP, ACCEPT, ACCEPT }, // TRANS { FAIL, ACCEPT, ACCEPT, ACCEPT } // NON-PRIM }; Message::Type msg_type(msg.type()); Verdict verdict (verdicts[state()][msg.type()]); if (verdict == FAIL) { gu_throw_fatal << "Invalid input, message " << msg.to_string() << " in state " << to_string(state()); } else if (verdict == DROP) { log_debug << "Dropping input, message " << msg.to_string() << " in state " << to_string(state()); return; } switch (msg_type) { case Message::T_STATE: gu_trace(handle_state(msg, um.source())); break; case Message::T_INSTALL: gu_trace(handle_install(msg, um.source())); break; case Message::T_USER: gu_trace(handle_user(msg, rb, um)); break; default: gu_throw_fatal << "Invalid message"; } } void gcomm::pc::Proto::handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (um.has_view() == true) { handle_view(um.view()); } else { Message msg; const gu::byte_t* b(gcomm::begin(rb)); const size_t available(gcomm::available(rb)); try { (void)msg.unserialize(b, available, 0); } catch (gu::Exception& e) { switch (e.get_errno()) { case EPROTONOSUPPORT: if (prim() == false) { gu_throw_fatal << e.what() << " terminating"; } else { log_warn << "unknown/unsupported protocol version: " << msg.version() << " dropping message"; return; } break; default: GU_TRACE(e); throw; } } if (checksum_ == true && msg.flags() & Message::F_CRC16) { test_checksum(msg, rb, rb.offset()); } try { handle_msg(msg, rb, um); } catch (gu::Exception& e) { log_error << "caught exception in PC, state dump to stderr follows:"; std::cerr << *this << std::endl; throw; } } } int gcomm::pc::Proto::handle_down(Datagram& dg, const ProtoDownMeta& dm) { switch (state()) { case S_CLOSED: case S_NON_PRIM: // Not connected to primary component return ENOTCONN; case S_STATES_EXCH: case S_INSTALL: case S_TRANS: // Transient error return EAGAIN; case S_PRIM: // Allowed to send, fall through break; case S_MAX: gu_throw_fatal << "invalid state " << state(); } if (gu_unlikely(dg.len() > mtu())) { return EMSGSIZE; } uint32_t seq(dm.order() == O_SAFE ? last_sent_seq_ + 1 : last_sent_seq_); UserMessage um(current_view_.version(), seq); push_header(um, dg); if (checksum_ == true) { checksum(um, dg); } int ret = send_down(dg, dm); if (ret == 0) { last_sent_seq_ = seq; } else if (ret != EAGAIN) { log_warn << "Proto::handle_down: " << strerror(ret); } pop_header(um, dg); return ret; } bool gcomm::pc::Proto::set_param(const std::string& key, const std::string& value) { if (key == gcomm::Conf::PcIgnoreSb) { ignore_sb_ = gu::from_string(value); conf_.set(gcomm::Conf::PcIgnoreSb, value); return true; } else if (key == gcomm::Conf::PcIgnoreQuorum) { ignore_quorum_ = gu::from_string(value); conf_.set(gcomm::Conf::PcIgnoreQuorum, value); return true; } else if (key == gcomm::Conf::PcBootstrap) { if (state() != S_NON_PRIM) { log_info << "ignoring '" << key << "' in state " << to_string(state()); } else { send_install(true); } return true; } else if (key == gcomm::Conf::PcWeight) { if (state() != S_PRIM) { gu_throw_error(EAGAIN) << "can't change weightm: state not S_PRIM, retry again"; } else { int w(gu::from_string(value)); if (w < 0 || w > 255) { gu_throw_error(ERANGE) << "value " << w << " for '" << key << "' out of range"; } weight_ = w; send_install(false, weight_); return true; } } else if (key == Conf::PcChecksum || key == Conf::PcAnnounceTimeout || key == Conf::PcLinger || key == Conf::PcNpvo || key == Conf::PcWaitPrim || key == Conf::PcWaitPrimTimeout || key == Conf::PcRecovery) { gu_throw_error(EPERM) << "can't change value for '" << key << "' during runtime"; } return false; } galera-3-25.3.20/gcomm/src/socket.cpp0000644000015300001660000000103513042054732017022 0ustar jenkinsjenkins// // Copyright (C) 2012 Codership Oy // #include "socket.hpp" static const std::string SocketOptPrefix = "socket."; const std::string gcomm::Socket::OptNonBlocking = SocketOptPrefix + "non_blocking"; const std::string gcomm::Socket::OptIfAddr = SocketOptPrefix + "if_addr"; const std::string gcomm::Socket::OptIfLoop = SocketOptPrefix + "if_loop"; const std::string gcomm::Socket::OptCRC32 = SocketOptPrefix + "crc32"; const std::string gcomm::Socket::OptMcastTTL = SocketOptPrefix + "mcast_ttl"; galera-3-25.3.20/gcomm/src/gmcast_proto.hpp0000644000015300001660000001040413042054732020240 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_GMCAST_PROTO_HPP #define GCOMM_GMCAST_PROTO_HPP #include "gu_datetime.hpp" #include "gcomm/uuid.hpp" #include "gcomm/util.hpp" #include "socket.hpp" #include "gmcast_message.hpp" #include "gmcast_link.hpp" namespace gcomm { class GMCast; namespace gmcast { class Proto; class ProtoMap; std::ostream& operator<<(std::ostream& os, const Proto& p); } } class gcomm::gmcast::Proto { public: enum State { S_INIT, S_HANDSHAKE_SENT, S_HANDSHAKE_WAIT, S_HANDSHAKE_RESPONSE_SENT, S_OK, S_FAILED, S_CLOSED }; public: void set_state(State new_state); State state() const { return state_; } static std::string to_string (State s) { switch (s) { case S_INIT: return "INIT"; case S_HANDSHAKE_SENT: return "HANDSHAKE_SENT"; case S_HANDSHAKE_WAIT: return "HANDSHAKE_WAIT"; case S_HANDSHAKE_RESPONSE_SENT: return "HANDSHAKE_RESPONSE_SENT"; case S_OK: return "OK"; case S_FAILED: return "FAILED"; case S_CLOSED: return "CLOSED"; default: return "UNKNOWN"; } } Proto (const GMCast& gmcast, int version, SocketPtr tp, const std::string& local_addr, const std::string& remote_addr, const std::string& mcast_addr, uint8_t local_segment, const std::string& group_name) : version_ (version), handshake_uuid_ (), remote_uuid_ (), local_segment_ (local_segment), remote_segment_ (0), local_addr_ (local_addr), remote_addr_ (remote_addr), mcast_addr_ (mcast_addr), group_name_ (group_name), changed_ (false), state_ (S_INIT), propagate_remote_ (false), tp_ (tp), link_map_ (), tstamp_ (gu::datetime::Date::now()), gmcast_ (gmcast) { } ~Proto() { tp_->close(); } void send_msg(const Message& msg); void send_handshake(); void wait_handshake(); void handle_handshake(const Message& hs); void handle_handshake_response(const Message& hs); void handle_ok(const Message& hs); void handle_failed(const Message& hs); void handle_topology_change(const Message& msg); void handle_keepalive(const Message& msg); void send_topology_change(LinkMap& um); void handle_message(const Message& msg); void send_keepalive(); const gcomm::UUID& handshake_uuid() const { return handshake_uuid_; } const gcomm::UUID& local_uuid() const; const gcomm::UUID& remote_uuid() const { return remote_uuid_; } uint8_t remote_segment() const { return remote_segment_; } SocketPtr socket() const { return tp_; } const std::string& remote_addr() const { return remote_addr_; } const std::string& mcast_addr() const { return mcast_addr_; } const LinkMap& link_map() const { return link_map_; } bool changed() { bool ret = changed_; changed_ = false; return ret; } int version() const { return version_; } void set_tstamp(gu::datetime::Date ts) { tstamp_ = ts; } gu::datetime::Date tstamp() const { return tstamp_; } private: friend std::ostream& operator<<(std::ostream&, const Proto&); Proto(const Proto&); void operator=(const Proto&); int version_; gcomm::UUID handshake_uuid_; gcomm::UUID remote_uuid_; uint8_t local_segment_; uint8_t remote_segment_; std::string local_addr_; std::string remote_addr_; std::string mcast_addr_; std::string group_name_; bool changed_; State state_; bool propagate_remote_; SocketPtr tp_; LinkMap link_map_; gu::datetime::Date tstamp_; const GMCast& gmcast_; }; class gcomm::gmcast::ProtoMap : public Map { }; #endif // !GCOMM_GMCAST_PROTO_HPP galera-3-25.3.20/gcomm/src/gmcast_proto.cpp0000644000015300001660000002135113042054732020236 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "gmcast_proto.hpp" #include "gmcast.hpp" #include "gu_uri.hpp" using std::rel_ops::operator!=; const gcomm::UUID& gcomm::gmcast::Proto::local_uuid() const { return gmcast_.uuid(); } std::ostream& gcomm::gmcast::operator<<(std::ostream& os, const Proto& p) { os << "v=" << p.version_ << "," << "lu=" << p.gmcast_.uuid() << "," << "ru=" << p.remote_uuid_ << "," << "ls=" << static_cast(p.local_segment_) << "," << "rs=" << static_cast(p.remote_segment_) << "," << "la=" << p.local_addr_ << "," << "ra=" << p.remote_addr_ << "," << "mc=" << p.mcast_addr_ << "," << "gn=" << p.group_name_ << "," << "ch=" << p.changed_ << "," << "st=" << gcomm::gmcast::Proto::to_string(p.state_) << "," << "pr=" << p.propagate_remote_ << "," << "tp=" << p.tp_ << "," << "ts=" << p.tstamp_; return os; } void gcomm::gmcast::Proto:: set_state(State new_state) { log_debug << "State change: " << to_string(state_) << " -> " << to_string(new_state); static const bool allowed[][7] = { // INIT HS_SENT HS_WAIT HSR_SENT OK FAILED CLOSED { false, true, true, false, false, true, false },// INIT { false, false, false, false, true, true, false },// HS_SENT { false, false, false, true, false, true, false },// HS_WAIT { false, false, false, false, true, true, false },// HSR_SENT { false, false, false, false, true, true, true },// OK { false, false, false, false, false, true, true },// FAILED { false, false, false, false, false, false, false } // CLOSED }; if (!allowed[state_][new_state]) { gu_throw_fatal << "Invalid state change: " << to_string(state_) << " -> " << to_string(new_state); } state_ = new_state; } void gcomm::gmcast::Proto::send_msg(const Message& msg) { gu::Buffer buf; gu_trace(serialize(msg, buf)); Datagram dg(buf); int ret = tp_->send(dg); // @todo: This can happen during congestion, figure out how to // avoid terminating connection with topology change messages. if (ret != 0) { log_debug << "Send failed: " << strerror(ret); set_state(S_FAILED); } } void gcomm::gmcast::Proto::send_handshake() { handshake_uuid_ = UUID(0, 0); Message hs (version_, Message::T_HANDSHAKE, handshake_uuid_, gmcast_.uuid(), local_segment_); send_msg(hs); set_state(S_HANDSHAKE_SENT); } void gcomm::gmcast::Proto::wait_handshake() { if (state() != S_INIT) gu_throw_fatal << "Invalid state: " << to_string(state()); set_state(S_HANDSHAKE_WAIT); } void gcomm::gmcast::Proto::handle_handshake(const Message& hs) { if (state() != S_HANDSHAKE_WAIT) gu_throw_fatal << "Invalid state: " << to_string(state()); if (hs.version() != version_) { log_warn << "incompatible protocol version: " << hs.version(); set_state(S_FAILED); return; } handshake_uuid_ = hs.handshake_uuid(); remote_uuid_ = hs.source_uuid(); remote_segment_ = hs.segment_id(); Message hsr (version_, Message::T_HANDSHAKE_RESPONSE, handshake_uuid_, gmcast_.uuid(), local_addr_, group_name_, local_segment_); send_msg(hsr); set_state(S_HANDSHAKE_RESPONSE_SENT); } void gcomm::gmcast::Proto::handle_handshake_response(const Message& hs) { if (state() != S_HANDSHAKE_SENT) gu_throw_fatal << "Invalid state: " << to_string(state()); const std::string& grp = hs.group_name(); try { if (grp != group_name_) { log_info << "handshake failed, my group: '" << group_name_ << "', peer group: '" << grp << "'"; Message failed(version_, Message::T_FAIL, gmcast_.uuid(), local_segment_, "invalid group"); send_msg(failed); set_state(S_FAILED); return; } remote_uuid_ = hs.source_uuid(); remote_segment_ = hs.segment_id(); gu::URI remote_uri(tp_->remote_addr()); remote_addr_ = uri_string(remote_uri.get_scheme(), remote_uri.get_host(), gu::URI(hs.node_address()).get_port()); if (gmcast_.is_evicted(remote_uuid_) == true) { log_info << "peer " << remote_uuid_ << " from " << remote_addr_ << " has been evicted out, rejecting connection"; Message failed(version_, Message::T_FAIL, gmcast_.uuid(), local_segment_, "evicted"); send_msg(failed); set_state(S_FAILED); return; } propagate_remote_ = true; Message ok(version_, Message::T_OK, gmcast_.uuid(), local_segment_, ""); send_msg(ok); set_state(S_OK); } catch (std::exception& e) { log_warn << "Parsing peer address '" << hs.node_address() << "' failed: " << e.what(); Message nok (version_, Message::T_FAIL, gmcast_.uuid(), local_segment_, "invalid node address"); send_msg (nok); set_state(S_FAILED); } } void gcomm::gmcast::Proto::handle_ok(const Message& hs) { if (state_ == S_OK) { log_debug << "handshake ok: " << *this; } propagate_remote_ = true; set_state(S_OK); } void gcomm::gmcast::Proto::handle_failed(const Message& hs) { log_warn << "handshake with " << remote_uuid_ << " " << remote_addr_ << " failed: '" << hs.error() << "'"; set_state(S_FAILED); if (hs.error() == "evicted") { // otherwise node use the uuid in view state file. // which is probably still in other nodes evict list. gmcast_.remove_viewstate_file(); gu_throw_fatal << "this node has been evicted out of the cluster, " << "gcomm backend restart is required"; } } void gcomm::gmcast::Proto::handle_topology_change(const Message& msg) { const Message::NodeList& nl(msg.node_list()); LinkMap new_map; for (Message::NodeList::const_iterator i = nl.begin(); i != nl.end(); ++i) { new_map.insert(Link(Message::NodeList::key(i), Message::NodeList::value(i).addr(), Message::NodeList::value(i).mcast_addr())); if (Message::NodeList::key(i) == remote_uuid() && mcast_addr_ == "" && Message::NodeList::value(i).mcast_addr() != "") { mcast_addr_ = Message::NodeList::value(i).mcast_addr(); } } if (link_map_ != new_map) { changed_ = true; } link_map_ = new_map; } void gcomm::gmcast::Proto::handle_keepalive(const Message& msg) { log_debug << "keepalive: " << *this; Message ok(version_, Message::T_OK, gmcast_.uuid(), local_segment_, ""); send_msg(ok); } void gcomm::gmcast::Proto::send_topology_change(LinkMap& um) { Message::NodeList nl; for (LinkMap::const_iterator i = um.begin(); i != um.end(); ++i) { if (LinkMap::key(i) == UUID::nil() || LinkMap::value(i).addr() == "") gu_throw_fatal << "nil uuid or empty address"; nl.insert_unique( std::make_pair(LinkMap::key(i), Node(LinkMap::value(i).addr()))); } Message msg(version_, Message::T_TOPOLOGY_CHANGE, gmcast_.uuid(), group_name_, nl); send_msg(msg); } void gcomm::gmcast::Proto::send_keepalive() { log_debug << "sending keepalive: " << *this; Message msg(version_, Message::T_KEEPALIVE, gmcast_.uuid(), local_segment_, ""); send_msg(msg); } void gcomm::gmcast::Proto::handle_message(const Message& msg) { switch (msg.type()) { case Message::T_HANDSHAKE: handle_handshake(msg); break; case Message::T_HANDSHAKE_RESPONSE: handle_handshake_response(msg); break; case Message::T_OK: handle_ok(msg); break; case Message::T_FAIL: handle_failed(msg); break; case Message::T_TOPOLOGY_CHANGE: handle_topology_change(msg); break; case Message::T_KEEPALIVE: handle_keepalive(msg); break; default: gu_throw_fatal << "invalid message type: " << msg.type(); } } galera-3-25.3.20/gcomm/src/asio_protonet.hpp0000644000015300001660000000273113042054732020430 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy */ #ifndef GCOMM_ASIO_PROTONET_HPP #define GCOMM_ASIO_PROTONET_HPP #include "gcomm/protonet.hpp" #include "socket.hpp" #include "gu_monitor.hpp" #include "gu_asio.hpp" #include #include #include namespace gcomm { class AsioProtonet; } class gcomm::AsioProtonet : public gcomm::Protonet { public: AsioProtonet(gu::Config& conf, int version = 0); ~AsioProtonet(); void event_loop(const gu::datetime::Period& p); void dispatch(const SocketId&, const Datagram&, const ProtoUpMeta&); void interrupt(); SocketPtr socket(const gu::URI&); gcomm::Acceptor* acceptor(const gu::URI&); void enter(); void leave(); size_t mtu() const { return mtu_; } #ifdef HAVE_ASIO_SSL_HPP std::string get_ssl_password() const; #endif // HAVE_ASIO_SSL_HPP private: friend class AsioTcpSocket; friend class AsioTcpAcceptor; friend class AsioUdpSocket; AsioProtonet(const AsioProtonet&); void handle_wait(const asio::error_code& ec); gu::RecursiveMutex mutex_; gu::datetime::Date poll_until_; asio::io_service io_service_; asio::deadline_timer timer_; #ifdef HAVE_ASIO_SSL_HPP asio::ssl::context ssl_context_; #endif /* HAVE_ASIO_SSL_HPP */ size_t mtu_; NetHeader::checksum_t checksum_; }; #endif // GCOMM_ASIO_PROTONET_HPP galera-3-25.3.20/gcomm/src/evs_consensus.cpp0000644000015300001660000004255613042054732020444 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "evs_consensus.hpp" #include "evs_message2.hpp" #include "evs_input_map2.hpp" #include "evs_node.hpp" #include "evs_proto.hpp" #include "gcomm/view.hpp" #include "gu_logger.hpp" #include // Disable debug logging until debug mask is available here #define evs_log_debug(i) if ((proto_.debug_mask_ & gcomm::evs::Proto::D_CONSENSUS) == 0) \ {} else log_debug << proto_.uuid() << " " // // Helpers // class LeaveSeqCmpOp { public: bool operator()(const gcomm::evs::MessageNodeList::value_type& a, const gcomm::evs::MessageNodeList::value_type& b) const { using gcomm::evs::MessageNode; using gcomm::evs::MessageNodeList; const MessageNode& aval(MessageNodeList::value(a)); const MessageNode& bval(MessageNodeList::value(b)); gcomm_assert(aval.leaving() != false && bval.leaving() != false); const gcomm::evs::seqno_t asec(aval.leave_seq()); const gcomm::evs::seqno_t bsec(bval.leave_seq()); gcomm_assert(asec != -1 && bsec != -1); return (asec < bsec); } }; class RangeLuCmp { public: bool operator()(const gcomm::evs::MessageNodeList::value_type& a, const gcomm::evs::MessageNodeList::value_type& b) const { return (gcomm::evs::MessageNodeList::value(a).im_range().lu() < gcomm::evs::MessageNodeList::value(b).im_range().lu()); } }; class SafeSeqCmp { public: bool operator()(const gcomm::evs::MessageNodeList::value_type& a, const gcomm::evs::MessageNodeList::value_type& b) const { return (gcomm::evs::MessageNodeList::value(a).safe_seq() < gcomm::evs::MessageNodeList::value(b).safe_seq()); } }; // // // bool gcomm::evs::Consensus::equal(const Message& m1, const Message& m2) const { gcomm_assert(m1.type() == Message::T_JOIN || m1.type() == Message::T_INSTALL); gcomm_assert(m2.type() == Message::T_JOIN || m2.type() == Message::T_INSTALL); // Seq and aru seq are comparable only if coming from same view if (m1.source_view_id() == m2.source_view_id()) { if (m1.seq() != m2.seq()) { evs_log_debug(D_CONSENSUS) << "seq not equal " << m1.seq() << " " << m2.seq(); return false; } if (m1.aru_seq() != m2.aru_seq()) { evs_log_debug(D_CONSENSUS) << "aruseq not equal " << m1.aru_seq() << " " << m2.aru_seq(); return false; } } MessageNodeList nl1, nl2; // When comparing messages from same source whole node list is comparable, // otherwise only operational part of it. if (m1.source() == m2.source()) { for_each(m1.node_list().begin(), m1.node_list().end(), SelectNodesOp(nl1, m1.source_view_id(), true, true)); for_each(m2.node_list().begin(), m2.node_list().end(), SelectNodesOp(nl2, m2.source_view_id(), true, true)); } else { for_each(m1.node_list().begin(), m1.node_list().end(), SelectNodesOp(nl1, ViewId(), true, false)); for_each(m2.node_list().begin(), m2.node_list().end(), SelectNodesOp(nl2, ViewId(), true, false)); } evs_log_debug(D_CONSENSUS) << "nl1: " << nl1 << " nl2: " << nl2; return (nl1 == nl2); } gcomm::evs::seqno_t gcomm::evs::Consensus::highest_reachable_safe_seq() const { std::list seq_list; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); const JoinMessage* jm(node.join_message()); const LeaveMessage* lm(node.leave_message()); if ((jm == 0 && current_view_.is_member(NodeMap::key(i)) == true) || (jm != 0 && jm->source_view_id() == current_view_.id()) || (lm != 0 && lm->source_view_id() == current_view_.id())) { if (lm != 0) { if (proto_.is_all_suspected(uuid) == false) { seq_list.push_back(lm->seq()); } } else if (node.operational() == false) { seq_list.push_back( std::min( input_map_.safe_seq(node.index()), input_map_.range(node.index()).lu() - 1)); } else { seq_list.push_back(input_map_.range(node.index()).hs()); } } } return *std::min_element(seq_list.begin(), seq_list.end()); } gcomm::evs::seqno_t gcomm::evs::Consensus::safe_seq_wo_all_susupected_leaving_nodes() const { seqno_t safe_seq(-2); for(NodeMap::const_iterator i = proto_.known_.begin(); i != proto_.known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (node.index() != std::numeric_limits::max()) { if (node.operational() == false && node.leave_message() && proto_.is_all_suspected(uuid)) { continue; } seqno_t ss = input_map_.safe_seq(node.index()); if (safe_seq == -2 || ss < safe_seq) { safe_seq = ss; } } } return safe_seq; } namespace gcomm { namespace evs { class FilterAllSuspectedOp { public: FilterAllSuspectedOp(MessageNodeList& nl, const Proto& proto) : nl_(nl), proto_(proto) {} void operator()(const MessageNodeList::value_type& vt) const { const UUID& uuid(MessageNodeList::key(vt)); if (!proto_.is_all_suspected(uuid)) { nl_.insert_unique(vt); } } private: MessageNodeList& nl_; const Proto& proto_; }; } // evs } // gcomm bool gcomm::evs::Consensus::is_consistent_highest_reachable_safe_seq( const Message& msg) const { gcomm_assert(msg.type() == Message::T_JOIN || msg.type() == Message::T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); const MessageNodeList& node_list(msg.node_list()); // Same view MessageNodeList same_view; for_each(node_list.begin(), node_list.end(), SelectNodesOp(same_view, current_view_.id(), true, false)); MessageNodeList::const_iterator max_hs_i(max_element(same_view.begin(), same_view.end(), RangeHsCmp())); gcomm_assert(max_hs_i != same_view.end()); // Max highest seen const seqno_t max_hs( MessageNodeList::value(max_hs_i).im_range().hs()); seqno_t max_reachable_safe_seq(max_hs); // Leaving Nodes MessageNodeList t_leaving; for_each(node_list.begin(), node_list.end(), SelectNodesOp(t_leaving, current_view_.id(), false, true)); MessageNodeList leaving; for_each(t_leaving.begin(), t_leaving.end(), FilterAllSuspectedOp(leaving, proto_)); if (leaving.empty() == false) { const MessageNodeList::const_iterator min_leave_seq_i( std::min_element(leaving.begin(), leaving.end(), LeaveSeqCmpOp())); gcomm_assert(min_leave_seq_i != leaving.end()); const seqno_t min_leave_seq( MessageNodeList::value(min_leave_seq_i).leave_seq()); max_reachable_safe_seq = std::min(max_reachable_safe_seq, min_leave_seq); } // Partitioning nodes MessageNodeList partitioning; for_each(node_list.begin(), node_list.end(), SelectNodesOp(partitioning, current_view_.id(), false, false)); if (partitioning.empty() == false) { MessageNodeList::const_iterator min_part_safe_seq_i( std::min_element(partitioning.begin(), partitioning.end(), SafeSeqCmp())); gcomm_assert(min_part_safe_seq_i != partitioning.end()); const seqno_t min_part_safe_seq( MessageNodeList::value(min_part_safe_seq_i).safe_seq()); max_reachable_safe_seq = std::min(max_reachable_safe_seq, min_part_safe_seq); MessageNodeList::const_iterator min_part_lu_i( std::min_element(partitioning.begin(), partitioning.end(), RangeLuCmp())); gcomm_assert(min_part_lu_i != partitioning.end()); const seqno_t min_part_lu(MessageNodeList::value(min_part_lu_i).im_range().lu() - 1); max_reachable_safe_seq = std::min(max_reachable_safe_seq, min_part_lu); } evs_log_debug(D_CONSENSUS) << " max reachable safe seq " << max_reachable_safe_seq << " highest reachable safe seq " << highest_reachable_safe_seq() << " max_hs " << max_hs << " input map max hs " << input_map_.max_hs() << " input map safe_seq " << input_map_.safe_seq() << " safe seq wo suspected leaving nodes " << safe_seq_wo_all_susupected_leaving_nodes(); return (input_map_.max_hs() == max_hs && highest_reachable_safe_seq() == max_reachable_safe_seq && // input_map_.safe_seq() == max_reachable_safe_seq); safe_seq_wo_all_susupected_leaving_nodes() == max_reachable_safe_seq); } bool gcomm::evs::Consensus::is_consistent_input_map(const Message& msg) const { gcomm_assert(msg.type() == Message::T_JOIN || msg.type() == Message::T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); if (msg.aru_seq() != input_map_.aru_seq()) { evs_log_debug(D_CONSENSUS) << "message aru seq " << msg.aru_seq() << " not consistent with input map aru seq " << input_map_.aru_seq(); return false; } if (msg.seq() != input_map_.safe_seq()) { evs_log_debug(D_CONSENSUS) << "message safe seq " << msg.seq() << " not consistent with input map safe seq " << input_map_.safe_seq(); return false; } Map local_insts, msg_insts; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (current_view_.is_member(uuid) == true) { gu_trace((void)local_insts.insert_unique( std::make_pair(uuid, input_map_.range(node.index())))); } } const MessageNodeList& m_insts(msg.node_list()); for (MessageNodeList::const_iterator i = m_insts.begin(); i != m_insts.end(); ++i) { const UUID& msg_uuid(MessageNodeList::key(i)); const MessageNode& msg_inst(MessageNodeList::value(i)); if (msg_inst.view_id() == current_view_.id()) { gu_trace((void)msg_insts.insert_unique( std::make_pair(msg_uuid, msg_inst.im_range()))); } } evs_log_debug(D_CONSENSUS) << " msg_insts " << msg_insts << " local_insts " << local_insts; return (msg_insts == local_insts); } bool gcomm::evs::Consensus::is_consistent_partitioning(const Message& msg) const { gcomm_assert(msg.type() == Message::T_JOIN || msg.type() == Message::T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); // Compare instances that were present in the current view but are // not proceeding in the next view. Map local_insts, msg_insts; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (node.operational() == false && node.leave_message() == 0 && current_view_.is_member(uuid) == true) { gu_trace((void)local_insts.insert_unique( std::make_pair(uuid, input_map_.range(node.index())))); } } const MessageNodeList& m_insts = msg.node_list(); for (MessageNodeList::const_iterator i = m_insts.begin(); i != m_insts.end(); ++i) { const UUID& m_uuid(MessageNodeList::key(i)); const MessageNode& m_inst(MessageNodeList::value(i)); if (m_inst.operational() == false && m_inst.leaving() == false && m_inst.view_id() == current_view_.id()) { gu_trace((void)msg_insts.insert_unique( std::make_pair(m_uuid, m_inst.im_range()))); } } evs_log_debug(D_CONSENSUS) << " msg insts:\n" << msg_insts << " local insts:\n" << local_insts; return (msg_insts == local_insts); } bool gcomm::evs::Consensus::is_consistent_leaving(const Message& msg) const { gcomm_assert(msg.type() == Message::T_JOIN || msg.type() == Message::T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); // Compare instances that were present in the current view but are // not proceeding in the next view. Map local_insts, msg_insts; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& inst(NodeMap::value(i)); const LeaveMessage* lm(inst.leave_message()); if (inst.operational() == false && lm != 0 && lm->source_view_id() == current_view_.id()) { gu_trace((void)local_insts.insert_unique( std::make_pair(uuid, input_map_.range(inst.index())))); } } const MessageNodeList& m_insts = msg.node_list(); for (MessageNodeList::const_iterator i = m_insts.begin(); i != m_insts.end(); ++i) { const UUID& m_uuid(MessageNodeList::key(i)); const MessageNode& m_inst(MessageNodeList::value(i)); if (m_inst.operational() == false && m_inst.leaving() == true && m_inst.view_id() == current_view_.id()) { gu_trace((void)msg_insts.insert_unique( std::make_pair(m_uuid, m_inst.im_range()))); } } evs_log_debug(D_CONSENSUS) << " msg insts " << msg_insts << " local insts " << local_insts; return (local_insts == msg_insts); } bool gcomm::evs::Consensus::is_consistent_same_view(const Message& msg) const { gcomm_assert(msg.type() == Message::T_JOIN || msg.type() == Message::T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); if (is_consistent_highest_reachable_safe_seq(msg) == false) { evs_log_debug(D_CONSENSUS) << "highest reachable safe seq not consistent"; return false; } if (is_consistent_input_map(msg) == false) { evs_log_debug(D_CONSENSUS) << "input map not consistent with " << msg; return false; } if (is_consistent_partitioning(msg) == false) { evs_log_debug(D_CONSENSUS) << "partitioning not consistent with " << msg; return false; } if (is_consistent_leaving(msg) == false) { evs_log_debug(D_CONSENSUS) << "leaving not consistent with " << msg; return false; } return true; } bool gcomm::evs::Consensus::is_consistent(const Message& msg) const { gcomm_assert(msg.type() == Message::T_JOIN || msg.type() == Message::T_INSTALL); const JoinMessage* my_jm = NodeMap::value(known_.find_checked(proto_.uuid())).join_message(); if (my_jm == 0) { return false; } if (msg.source_view_id() == current_view_.id()) { return (is_consistent_same_view(msg) == true && equal(msg, *my_jm) == true); } else { return equal(msg, *my_jm); } } bool gcomm::evs::Consensus::is_consensus() const { const JoinMessage* my_jm = NodeMap::value(known_.find_checked(proto_.uuid())).join_message(); if (my_jm == 0) { evs_log_debug(D_CONSENSUS) << "no own join message"; return false; } if (is_consistent_same_view(*my_jm) == false) { evs_log_debug(D_CONSENSUS) << "own join message not consistent"; return false; } for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const Node& inst(NodeMap::value(i)); if (inst.operational() == true) { const JoinMessage* jm = inst.join_message(); if (jm == 0) { evs_log_debug(D_CONSENSUS) << "no join message for " << NodeMap::key(i); return false; } // call is_consistent() instead of equal() to enforce strict // check for messages originating from the same view (#541) if (is_consistent(*jm) == false) { evs_log_debug(D_CONSENSUS) << "join message " << *jm << " not consistent with my join " << *my_jm; return false; } } } return true; } galera-3-25.3.20/gcomm/src/evs_seqno.hpp0000644000015300001660000000325013042054732017542 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #ifndef EVS_SEQNO_HPP #define EVS_SEQNO_HPP #include "gcomm/types.hpp" #include "gu_serialize.hpp" //#include // for uint16_t #include #include namespace gcomm { namespace evs { typedef int64_t seqno_t; class Range; std::ostream& operator<<(std::ostream&, const Range&); } } /*! * */ class gcomm::evs::Range { public: Range(const seqno_t lu = -1, const seqno_t hs = -1) : lu_(lu), hs_(hs) {} seqno_t lu() const { return lu_; } seqno_t hs() const { return hs_; } void set_lu(const seqno_t s) { lu_ = s; } void set_hs(const seqno_t s) { hs_ = s; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { gu_trace(offset = gu::serialize8(lu_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(hs_, buf, buflen, offset)); return offset; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { gu_trace(offset = gu::unserialize8(buf, buflen, offset, lu_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, hs_)); return offset; } static size_t serial_size() { return 2 * sizeof(seqno_t); } bool operator==(const Range& cmp) const { return (lu_ == cmp.lu_ && hs_ == cmp.hs_); } private: seqno_t lu_; /*!< Lowest unseen seqno */ seqno_t hs_; /*!< Highest seen seqno */ }; inline std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::Range& r) { return (os << "[" << r.lu() << "," << r.hs() << "]"); } #endif // EVS_SEQNO_HPP galera-3-25.3.20/gcomm/src/datagram.cpp0000644000015300001660000000565513042054732017326 0ustar jenkinsjenkins/* * Copyright (C) 2013 Codership Oy */ #include "gcomm/datagram.hpp" #include "gu_crc.hpp" // CRC-32C - optimized and potentially accelerated #include "gu_logger.hpp" #include "gu_throw.hpp" #include // CRC32 - backward compatible gcomm::NetHeader::checksum_t gcomm::NetHeader::checksum_type (int i) { switch(i) { case CS_NONE: log_info << "Message checksums disabled."; return CS_NONE; case CS_CRC32: log_info << "Using CRC-32 (backward-compatible) for message checksums."; return CS_CRC32; case CS_CRC32C: log_info << "Using CRC-32C for message checksums."; return CS_CRC32C; } log_warn << "Ignoring unknown checksum type: " << i << ". Falling back to CRC-32."; return CS_CRC32; } uint16_t gcomm::crc16(const gcomm::Datagram& dg, size_t offset) { assert(offset < dg.len()); gu::byte_t lenb[4]; gu::serialize4(static_cast(dg.len() - offset), lenb, sizeof(lenb), 0); boost::crc_16_type crc; crc.process_block(lenb, lenb + sizeof(lenb)); if (offset < dg.header_len()) { crc.process_block(dg.header_ + dg.header_offset_ + offset, dg.header_ + dg.header_size_); offset = 0; } else { offset -= dg.header_len(); } crc.process_block(&(*dg.payload_)[0] + offset, &(*dg.payload_)[0] + dg.payload_->size()); return crc.checksum(); } uint32_t gcomm::crc32(gcomm::NetHeader::checksum_t const type, const gcomm::Datagram& dg, size_t offset) { boost::crc_32_type crc; gu::byte_t lenb[4]; gu::serialize4(static_cast(dg.len() - offset), lenb, sizeof(lenb), 0); if (NetHeader::CS_CRC32 == type) { boost::crc_32_type crc; crc.process_block(lenb, lenb + sizeof(lenb)); if (offset < dg.header_len()) { crc.process_block(dg.header_ + dg.header_offset_ + offset, dg.header_ + dg.header_size_); offset = 0; } else { offset -= dg.header_len(); } crc.process_block(&(*dg.payload_)[0] + offset, &(*dg.payload_)[0] + dg.payload_->size()); return crc.checksum(); } else if (NetHeader::CS_CRC32C == type) { gu::CRC32C crc; crc.append (lenb, sizeof(lenb)); if (offset < dg.header_len()) { crc.append (dg.header_ + dg.header_offset_ + offset, dg.header_size_ - dg.header_offset_ - offset); offset = 0; } else { offset -= dg.header_len(); } crc.append (&(*dg.payload_)[0] + offset, dg.payload_->size() - offset); return crc(); } gu_throw_error(EINVAL) << "Unsupported checksum algorithm: " << type; } galera-3-25.3.20/gcomm/src/profile.hpp0000644000015300001660000001577113042054732017213 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ /*! * @file profile.hpp * * @brief Lightweight profiling utility. * * Profiling utility suitable for getting runtime code profile information * with minimal overhead. Macros profile_enter() and profile_leave() * can be inserted around the code and will be expanded to profiling * code if GCOMM_PROFILE is defined. * * Example usage: * @code * * Profile prof("prof"); * * void func() * { * if (is_true()) * { * profile_enter(prof); // This is line 227 * // Do something * // ... * profile_leave(prof); * } * else * { * profile_enter(prof); // This is line 250 * // Do something else * // ... * profile_leave(prof); * } * } * * // Somewhere else in your code * log_info << prof; * @endcode * */ #ifndef GCOMM_PROFILE_HPP #define GCOMM_PROFILE_HPP extern "C" { #include "gu_time.h" } #include #include namespace prof { // Forward declarations class Key; class Point; class Profile; std::ostream& operator<<(std::ostream&, const Key&); std::ostream& operator<<(std::ostream&, const Profile&); } /*! * Profile key storing human readable point description :: * and entry time. */ class prof::Key { public: Key(const char* const file, const char* const func, const int line) : file_(file), func_(func), line_(line) { } bool operator==(const Key& cmp) const { return (line_ == cmp.line_ && func_ == cmp.func_ && file_ == cmp.file_); } bool operator<(const Key& cmp) const { return (line_ < cmp.line_ || (line_ == cmp.line_ && (func_ < cmp.func_ || (func_ == cmp.func_ && file_ < cmp.file_)))); } std::string to_string() const { std::ostringstream os; os << *this; return os.str(); } private: friend class Point; friend class Profile; friend std::ostream& operator<<(std::ostream& os, const Key&); const char* const file_; const char* const func_; const int line_; }; inline std::ostream& prof::operator<<(std::ostream& os, const prof::Key& key) { return os << key.file_ << ":" << key.func_ << ":" << key.line_; } class prof::Point { public: Point(const Profile& prof, const char* file, const char* func, const int line); ~Point(); private: friend class Profile; const Profile& prof_; const Key key_; mutable long long int enter_time_calendar_; mutable long long int enter_time_thread_cputime_; }; /*! * Profile class for collecting statistics about profile points. */ class prof::Profile { struct PointStats { PointStats(long long int count = 0, long long int time_calendar = 0, long long int time_thread_cputime = 0) : count_ (count ), time_calendar_ (time_calendar ), time_thread_cputime_(time_thread_cputime) { } PointStats operator+(const PointStats& add) const { return PointStats(count_ + add.count_, time_calendar_ + add.time_calendar_, time_thread_cputime_ + add.time_thread_cputime_); } long long int count_; long long int time_calendar_; long long int time_thread_cputime_; }; public: /*! * Default constructor. * * @param name_ Name identifying the profile in ostream output. */ Profile(const std::string& name = "profile") : name_(name), start_time_calendar_(gu_time_calendar()), start_time_thread_cputime_(gu_time_thread_cputime()), points_() { } void enter(const Point& point) const { points_[point.key_].count_++; point.enter_time_calendar_ = gu_time_calendar(); point.enter_time_thread_cputime_ = gu_time_thread_cputime(); } void leave(const Point& point) const { long long int t_cal(gu_time_calendar()); long long int t_thdcpu(gu_time_thread_cputime()); points_[point.key_].time_calendar_ += (t_cal - point.enter_time_calendar_); points_[point.key_].time_thread_cputime_ += (t_thdcpu - point.enter_time_thread_cputime_); } void clear() { points_.clear(); } friend std::ostream& operator<<(std::ostream&, const Profile&); typedef std::map Map; std::string const name_; long long int const start_time_calendar_; long long int const start_time_thread_cputime_; mutable Map points_; }; inline prof::Point::Point(const Profile& prof, const char* file, const char* func, const int line) : prof_(prof), key_(file, func, line), enter_time_calendar_(), enter_time_thread_cputime_() { prof_.enter(*this); } inline prof::Point::~Point() { prof_.leave(*this); } /*! * Ostream operator for Profile class. */ inline std::ostream& prof::operator<<(std::ostream& os, const Profile& prof) { Profile::PointStats cumul; char prev_fill(os.fill()); os.fill(' '); os << "\nprofile name: " << prof.name_; os << std::left << std::fixed << std::setprecision(3); os << "\n\n"; os << std::setw(40) << "point"; os << std::setw(10) << "count"; os << std::setw(10) << "calendar"; os << std::setw(10) << "cpu"; os << "\n" << std::setfill('-') << std::setw(70) << "" << std::setfill(' ') << "\n"; for (Profile::Map::const_iterator i = prof.points_.begin(); i != prof.points_.end(); ++i) { os << std::setw(40) << std::left << i->first.to_string(); os << std::right; os << std::setw(10) << i->second.count_; os << std::setw(10) << double(i->second.time_calendar_)*1.e-9; os << std::setw(10) << double(i->second.time_thread_cputime_)*1.e-9; os << std::left; os << "\n"; cumul = cumul + i->second; } os << "\ntot count : " << cumul.count_; os << "\ntot calendar time : " << double(cumul.time_calendar_)*1.e-9; os << "\ntot thread cputime: " << double(cumul.time_thread_cputime_)*1.e-9; os << "\ntot ct since ctor : " << double(gu::datetime::Date::now().get_utc() - prof.start_time_calendar_)*1.e-9; os.fill(prev_fill); return os; } /* * Convenience macros for defining profile entry and leave points. * If GCOMM_PROFILE is undefined, these macros expand to no-op. */ #ifdef GCOMM_PROFILE #define profile_enter(__p) do { \ const prof::Point __point((__p), __FILE__, __FUNCTION__, __LINE__); \ #define profile_leave(__p) \ } while (0) #else #define profile_enter(__p) #define profile_leave(__p) #endif // GCOMM_PROFILE #endif // GCOMM_PROFILE_HPP galera-3-25.3.20/gcomm/src/evs_message2.cpp0000644000015300001660000005015113042054732020120 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy * * $Id$ */ #include "evs_message2.hpp" #include "gu_exception.hpp" #include "gu_logger.hpp" std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::MessageNode& node) { os << " {"; os << "o=" << node.operational() << ","; os << "s=" << node.suspected() << ","; os << "e=" << node.evicted() << ","; os << "ls=" << node.leave_seq() << ","; os << "vid=" << node.view_id() << ","; os << "ss=" << node.safe_seq() << ","; os << "ir=" << node.im_range() << ","; os << "}"; return os; } std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::Message& msg) { os << "{"; os << "v=" << static_cast(msg.version()) << ","; os << "t=" << msg.type() << ","; os << "ut=" << static_cast(msg.user_type()) << ","; os << "o=" << msg.order() << ","; os << "s=" << msg.seq() << ","; os << "sr=" << msg.seq_range() << ","; os << "as=" << msg.aru_seq() << ","; os << "f=" << static_cast(msg.flags()) << ","; os << "src=" << msg.source() << ","; os << "srcvid=" << msg.source_view_id() << ","; os << "insvid=" << msg.install_view_id() << ","; os << "ru=" << msg.range_uuid() << ","; os << "r=" << msg.range() << ","; os << "fs=" << msg.fifo_seq() << ","; os << "nl=(\n" << msg.node_list() << ")\n"; os << "}"; return os; } size_t gcomm::evs::MessageNode::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { uint8_t b = static_cast((operational_ == true ? F_OPERATIONAL : 0) | (suspected_ == true ? F_SUSPECTED : 0) | (evicted_ == true ? F_EVICTED : 0)); gu_trace(offset = gu::serialize1(b, buf, buflen, offset)); gu_trace(offset = gu::serialize1(segment_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(leave_seq_, buf, buflen, offset)); gu_trace(offset = view_id_.serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(safe_seq_, buf, buflen, offset)); gu_trace(offset = im_range_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::MessageNode::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); if ((b & ~(F_OPERATIONAL | F_SUSPECTED | F_EVICTED)) != 0) { log_warn << "unknown flags: " << static_cast(b); } operational_ = b & F_OPERATIONAL; suspected_ = b & F_SUSPECTED; evicted_ = b & F_EVICTED; gu_trace(offset = gu::unserialize1(buf, buflen, offset, segment_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, leave_seq_)); gu_trace(offset = view_id_.unserialize(buf, buflen, offset)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, safe_seq_)); gu_trace(offset = im_range_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::MessageNode::serial_size() { return 2 + // 4 bytes reserved for flags sizeof(seqno_t) + ViewId::serial_size() + sizeof(seqno_t) + Range::serial_size(); } bool gcomm::evs::Message::operator==(const Message& cmp) const { return (version_ == cmp.version_ && type_ == cmp.type_ && user_type_ == cmp.user_type_ && order_ == cmp.order_ && seq_ == cmp.seq_ && seq_range_ == cmp.seq_range_ && aru_seq_ == cmp.aru_seq_ && fifo_seq_ == cmp.fifo_seq_ && flags_ == cmp.flags_ && source_ == cmp.source_ && source_view_id_ == cmp.source_view_id_ && install_view_id_ == cmp.install_view_id_ && range_uuid_ == cmp.range_uuid_ && range_ == cmp.range_ && node_list_ == cmp.node_list_); } // // Header format: // 0 1 2 3 // | 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 | // |-----------------------------------------------------------------| // | zv | t | o | flags | real version | reserved | // |-----------------------------------------------------------------| // | fifo_seq | // | ... | // |-----------------------------------------------------------------| // | source | // | ... | // | ... | // | ... | // |-----------------------------------------------------------------| // |-----------------------------------------------------------------| // | source view id | // | ... | // | ... | // | ... | // | ... | // |-----------------------------------------------------------------| // // // zv - zeroversion // if zeroversion is 0, message version is 0, otherwise it is // read from real version // t - type // o - order // size_t gcomm::evs::Message::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { uint8_t zeroversion; switch (type_) { case T_JOIN: case T_INSTALL: zeroversion = 0; break; default: zeroversion = (version_ != 0 ? 1 : 0); } uint8_t b = static_cast(zeroversion | (type_ << 2) | (order_ << 5)); gu_trace(offset = gu::serialize1(b, buf, buflen, offset)); gu_trace(offset = gu::serialize1(flags_, buf, buflen, offset)); gu_trace(offset = gu::serialize1(version_, buf, buflen, offset)); gu_trace(offset = gu::serialize1(uint8_t(0), buf, buflen, offset)); gu_trace(offset = gu::serialize8(fifo_seq_, buf, buflen, offset)); if (flags_ & F_SOURCE) { gu_trace(offset = source_.serialize(buf, buflen, offset)); } gu_trace(offset = source_view_id_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::Message::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); // The message version will be read from offset 16 regardless what is // the zeroversion value. The only purpose of zeroversion is to // make pre 3.8 nodes to discard messages in new format. type_ = static_cast((b >> 2) & 0x7); if (type_ <= T_NONE || type_ > T_DELAYED_LIST) { gu_throw_error(EINVAL) << "invalid type " << type_; } order_ = static_cast((b >> 5) & 0x7); if (order_ < O_DROP || order_ > O_SAFE) { gu_throw_error(EINVAL) << "invalid safety prefix " << order_; } gu_trace(offset = gu::unserialize1(buf, buflen, offset, flags_)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, version_)); switch (type_) { case T_JOIN: case T_INSTALL: // Join and install message will always remain protocol zero, // version check is not applicable. break; default: if (version_ > GCOMM_PROTOCOL_MAX_VERSION) { gu_throw_error(EPROTONOSUPPORT) << "protocol version " << static_cast(version_) << " not supported"; } break; } uint8_t reserved; gu_trace(offset = gu::unserialize1(buf, buflen, offset, reserved)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, fifo_seq_)); if (flags_ & F_SOURCE) { gu_trace(offset = source_.unserialize(buf, buflen, offset)); } gu_trace(offset = source_view_id_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::Message::serial_size() const { return (1 + // version | type | order 1 + // flags 2 + // pad sizeof(fifo_seq_) + // fifo_seq ((flags_ & F_SOURCE) ? UUID::serial_size() : 0) + ViewId::serial_size()); // source_view_id } size_t gcomm::evs::UserMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize1(user_type_, buf, buflen, offset)); gcomm_assert(seq_range_ <= seqno_t(0xff)); uint8_t b = static_cast(seq_range_); gu_trace(offset = gu::serialize1(b, buf, buflen, offset)); gu_trace(offset = gu::serialize2(uint16_t(0), buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); return offset; } size_t gcomm::evs::UserMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } gu_trace(offset = gu::unserialize1(buf, buflen, offset, user_type_)); uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); seq_range_ = b; uint16_t pad; gu_trace(offset = gu::unserialize2(buf, buflen, offset, pad)); if (pad != 0) { log_warn << "invalid pad: " << pad; } gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); return offset; } size_t gcomm::evs::UserMessage::serial_size() const { return Message::serial_size() + // Header 1 + // User type 1 + // Seq range 2 + // Pad/reserved sizeof(seqno_t) + // Seq sizeof(seqno_t); // Aru seq } size_t gcomm::evs::AggregateMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = gu::serialize1(flags_, buf, buflen, offset)); gu_trace(offset = gu::serialize1(user_type_, buf, buflen, offset)); gu_trace(offset = gu::serialize2(len_, buf, buflen, offset)); return offset; } size_t gcomm::evs::AggregateMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = gu::unserialize1(buf, buflen, offset, flags_)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, user_type_)); gu_trace(offset = gu::unserialize2(buf, buflen, offset, len_)); return offset; } size_t gcomm::evs::AggregateMessage::serial_size() const { return sizeof(flags_) + sizeof(len_) + sizeof(user_type_); } size_t gcomm::evs::DelegateMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::DelegateMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } return offset; } size_t gcomm::evs::DelegateMessage::serial_size() const { return Message::serial_size(); } size_t gcomm::evs::GapMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); gu_trace(offset = range_uuid_.serialize(buf, buflen, offset)); gu_trace(offset = range_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::GapMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); gu_trace(offset = range_uuid_.unserialize(buf, buflen, offset)); gu_trace(offset = range_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::GapMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t) + UUID::serial_size() + Range::serial_size()); } size_t gcomm::evs::JoinMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); gu_trace(offset = node_list_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::JoinMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); node_list_.clear(); gu_trace(offset = node_list_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::JoinMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t) + node_list_.serial_size()); } size_t gcomm::evs::InstallMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); gu_trace(offset = install_view_id_.serialize(buf, buflen, offset)); gu_trace(offset = node_list_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::InstallMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); gu_trace(offset = install_view_id_.unserialize(buf, buflen, offset)); node_list_.clear(); gu_trace(offset = node_list_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::InstallMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t) + ViewId::serial_size() + node_list_.serial_size()); } size_t gcomm::evs::LeaveMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); return offset; } size_t gcomm::evs::LeaveMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); return offset; } size_t gcomm::evs::LeaveMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t)); } size_t gcomm::evs::DelayedListMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize1(static_cast(delayed_list_.size()), buf, buflen, offset)); for (DelayedList::const_iterator i(delayed_list_.begin()); i != delayed_list_.end(); ++i) { gu_trace(offset = i->first.serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize1(i->second, buf, buflen, offset)); } return offset; } size_t gcomm::evs::DelayedListMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset, bool skip_header) { if (skip_header == false) { gu_trace(offset = Message::unserialize(buf, buflen, offset)); } delayed_list_.clear(); uint8_t list_sz(0); gu_trace(offset = gu::unserialize1(buf, buflen, offset, list_sz)); for (uint8_t i(0); i < list_sz; ++i) { UUID uuid; uint8_t cnt; gu_trace(offset = uuid.unserialize(buf, buflen, offset)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, cnt)); delayed_list_.insert(std::make_pair(uuid, cnt)); } return offset; } size_t gcomm::evs::DelayedListMessage::serial_size() const { return (Message::serial_size() + gu::serial_size(uint8_t(0)) + std::min( delayed_list_.size(), static_cast(std::numeric_limits::max())) * (UUID::serial_size() + gu::serial_size(uint8_t(0)))); } galera-3-25.3.20/gcomm/src/protonet.cpp0000644000015300001660000000370313042054732017410 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy * * $Id$ */ #ifdef HAVE_ASIO_HPP #include "asio_protonet.hpp" #endif // HAVE_ASIO_HPP #include "gcomm/util.hpp" #include "gcomm/conf.hpp" void gcomm::Protonet::insert(Protostack* pstack) { log_debug << "insert pstack " << pstack; if (find(protos_.begin(), protos_.end(), pstack) != protos_.end()) { gu_throw_fatal; } protos_.push_back(pstack); } void gcomm::Protonet::erase(Protostack* pstack) { log_debug << "erase pstack " << pstack; std::deque::iterator i; if ((i = find(protos_.begin(), protos_.end(), pstack)) == protos_.end()) { gu_throw_fatal; } protos_.erase(i); } gu::datetime::Date gcomm::Protonet::handle_timers() { Critical crit(*this); gu::datetime::Date next_time(gu::datetime::Date::max()); { for (std::deque::iterator i = protos_.begin(); i != protos_.end(); ++i) { next_time = std::min(next_time, (*i)->handle_timers()); } } return next_time; } bool gcomm::Protonet::set_param(const std::string& key, const std::string& val) { bool ret(false); for (std::deque::iterator i(protos_.begin()); i != protos_.end(); ++i) { ret |= (*i)->set_param(key, val); } return ret; } gcomm::Protonet* gcomm::Protonet::create(gu::Config& conf) { const std::string backend(conf.get(Conf::ProtonetBackend)); const int version(conf.get(Conf::ProtonetVersion)); if (version > max_version_) { gu_throw_error(EINVAL) << "invalid protonet version: " << version; } log_info << "protonet " << backend << " version " << version; if (backend == "asio") return new AsioProtonet(conf, version); gu_throw_fatal << Conf::ProtonetBackend << " '" << backend << "' not supported"; throw; return 0; // keep compiler happy } galera-3-25.3.20/gcomm/src/transport.cpp0000644000015300001660000000301513042054732017566 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #include "gcomm/transport.hpp" #include "socket.hpp" #include "gmcast.hpp" #include "pc.hpp" #include "gcomm/conf.hpp" // Public methods const gcomm::UUID& gcomm::Transport::uuid() const { gu_throw_fatal << "UUID not supported by " + uri_.get_scheme(); } std::string gcomm::Transport::local_addr() const { gu_throw_fatal << "get local url not supported"; } std::string gcomm::Transport::remote_addr() const { gu_throw_fatal << "get remote url not supported"; } int gcomm::Transport::err_no() const { return error_no_; } void gcomm::Transport::listen() { gu_throw_fatal << "not supported"; } gcomm::Transport* gcomm::Transport::accept() { gu_throw_fatal << "not supported"; } // CTOR/DTOR gcomm::Transport::Transport(Protonet& pnet, const gu::URI& uri) : Protolay(pnet.conf()), pstack_(), pnet_(pnet), uri_(uri), error_no_(0) { } gcomm::Transport::~Transport() { } gcomm::Transport* gcomm::Transport::create(Protonet& pnet, const gu::URI& uri) { const std::string& scheme = uri.get_scheme(); if (scheme == Conf::GMCastScheme) { return new GMCast(pnet, uri); } else if (scheme == Conf::PcScheme) { return new PC(pnet, uri); } gu_throw_fatal << "scheme '" << uri.get_scheme() << "' not supported"; } gcomm::Transport* gcomm::Transport::create(Protonet& pnet, const std::string& uri_str) { return create(pnet, gu::URI(uri_str)); } galera-3-25.3.20/gcomm/src/gmcast_node.hpp0000644000015300001660000000323313042054732020024 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ #ifndef GMCAST_NODE_HPP #define GMCAST_NODE_HPP #include "gcomm/types.hpp" #include "gcomm/uuid.hpp" #include "gu_serialize.hpp" namespace gcomm { namespace gmcast { class Node; std::ostream& operator<<(std::ostream&, const Node&); } } class gcomm::gmcast::Node { public: Node(const std::string& addr = "") : addr_(addr), mcast_addr_("") { } const std::string& addr() const { return addr_.to_string(); } const std::string& mcast_addr() const { return mcast_addr_.to_string(); } size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; uint32_t bits; gu_trace (off = gu::unserialize4(buf, buflen, offset, bits)); gu_trace (off = addr_.unserialize(buf, buflen, off)); gu_trace (off = mcast_addr_.unserialize(buf, buflen, off)); return off; } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; uint32_t bits(0); gu_trace (off = gu::serialize4(bits, buf, buflen, offset)); gu_trace (off = addr_.serialize(buf, buflen, off)); gu_trace (off = mcast_addr_.serialize(buf, buflen, off)); return off; } static size_t serial_size() { return (4 + 2 * ADDR_SIZE); } private: static const size_t ADDR_SIZE = 64; gcomm::String addr_; gcomm::String mcast_addr_; }; inline std::ostream& gcomm::gmcast::operator<<(std::ostream& os, const Node& n) { return os; } #endif // GMCAST_NODE_HPP galera-3-25.3.20/gcomm/doc/0000755000015300001660000000000013042054732015005 5ustar jenkinsjenkinsgalera-3-25.3.20/gcomm/doc/Doxyfile0000644000015300001660000014370513042054732016525 0ustar jenkinsjenkins# Doxyfile 1.4.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = GComm # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../src ../src/gcomm # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.c *.h *.hpp # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO galera-3-25.3.20/gcomm/SConscript0000644000015300001660000000014113042054732016246 0ustar jenkinsjenkins# SCons build script for building gcomm SConscript(Split('''src/SConscript test/SConscript''')) galera-3-25.3.20/gcomm/test/0000755000015300001660000000000013042054732015217 5ustar jenkinsjenkinsgalera-3-25.3.20/gcomm/test/check_gcomm.hpp0000644000015300001660000000145413042054732020173 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef CHECK_GCOMM_HPP #define CHECK_GCOMM_HPP struct Suite; /* Tests for various common types */ Suite* types_suite(); /* Tests for utilities */ Suite* util_suite(); /* Tests for logger */ Suite* logger_suite(); /* Tests for message buffer implementations */ Suite* buffer_suite(); /* Tests for event loop */ Suite* event_suite(); /* Tests for concurrency handling (mutex, cond, thread, etc.)*/ Suite* concurrent_suite(); /* Tests for TCP transport */ Suite* tcp_suite(); /* Tests for GMcast transport */ Suite* gmcast_suite(); /* Tests for EVS transport */ Suite* evs_suite(); /* Better evs suite */ Suite* evs2_suite(); /* Tests for VS trasport */ Suite* vs_suite(); /* Tests for PC transport */ Suite* pc_suite(); #endif // CHECK_GCOMM_HPP galera-3-25.3.20/gcomm/test/check_trace.hpp0000644000015300001660000003716013042054732020172 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy * * $Id$ */ /*! * Classes for tracing views and messages */ #include "gu_uri.hpp" #include "gu_datetime.hpp" #include "gcomm/datagram.hpp" #include "gcomm/uuid.hpp" #include "gcomm/protolay.hpp" #include "gcomm/protostack.hpp" #include "gcomm/transport.hpp" #include "gcomm/map.hpp" #include "gcomm/util.hpp" #include #include #include gu::Config& check_trace_conf(); namespace gcomm { class TraceMsg { public: TraceMsg(const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const int64_t seq = -1) : source_(source), source_view_id_(source_view_id), seq_(seq) { } const UUID& source() const { return source_; } const ViewId& source_view_id() const { return source_view_id_; } int64_t seq() const { return seq_; } bool operator==(const TraceMsg& cmp) const { return (source_ == cmp.source_ && source_view_id_ == cmp.source_view_id_ && seq_ == cmp.seq_ ); } private: UUID source_; ViewId source_view_id_; int64_t seq_; }; std::ostream& operator<<(std::ostream& os, const TraceMsg& msg); class ViewTrace { public: ViewTrace(const View& view) : view_(view), msgs_() { } void insert_msg(const TraceMsg& msg) { switch (view_.type()) { case V_REG: gcomm_assert(view_.id() == msg.source_view_id()); gcomm_assert(contains(msg.source()) == true) << "msg source " << msg.source() << " not int view " << view_; break; case V_TRANS: gcomm_assert(view_.id().uuid() == msg.source_view_id().uuid() && view_.id().seq() == msg.source_view_id().seq()); break; case V_NON_PRIM: break; case V_PRIM: gcomm_assert(view_.id() == msg.source_view_id()) << " view id " << view_.id() << " source view " << msg.source_view_id(); gcomm_assert(contains(msg.source()) == true); break; case V_NONE: gu_throw_fatal; break; } if (view_.type() != V_NON_PRIM) { msgs_.push_back(msg); } } const View& view() const { return view_; } const std::deque& msgs() const { return msgs_; } bool operator==(const ViewTrace& cmp) const { // Note: Cannot compare joining members since seen differently // on different merging subsets return (view_.members() == cmp.view_.members() && view_.left() == cmp.view_.left() && view_.partitioned() == cmp.view_.partitioned() && msgs_ == cmp.msgs_ ); } private: bool contains(const UUID& uuid) const { return (view_.members().find(uuid) != view_.members().end() || view_.left().find(uuid) != view_.left().end() || view_.partitioned().find(uuid) !=view_.partitioned().end()); } View view_; std::deque msgs_; }; std::ostream& operator<<(std::ostream& os, const ViewTrace& vtr); class Trace { public: class ViewTraceMap : public Map { }; Trace() : views_(), current_view_(views_.end()) { } void insert_view(const View& view) { gu_trace(current_view_ = views_.insert_unique( std::make_pair(view.id(), ViewTrace(view)))); log_debug << view; } void insert_msg(const TraceMsg& msg) { gcomm_assert(current_view_ != views_.end()) << "no view set before msg delivery"; gu_trace(ViewTraceMap::value(current_view_).insert_msg(msg)); } const ViewTraceMap& view_traces() const { return views_; } const ViewTrace& current_view_trace() const { gcomm_assert(current_view_ != views_.end()); return ViewTraceMap::value(current_view_); } private: ViewTraceMap views_; ViewTraceMap::iterator current_view_; }; std::ostream& operator<<(std::ostream& os, const Trace& tr); class DummyTransport : public Transport { UUID uuid_; std::deque out_; bool queue_; public: DummyTransport(const UUID& uuid = UUID::nil(), bool queue = true, const gu::URI& uri = gu::URI("dummy:")) : Transport(*std::auto_ptr (Protonet::create(check_trace_conf())), uri), uuid_(uuid), out_(), queue_(queue) {} ~DummyTransport() { out_.clear(); } const UUID& uuid() const { return uuid_; } size_t mtu() const { return (1U << 31); } void connect(bool first) { } void close(bool force) { } void close(const UUID&) { } void connect() { } void listen() { gu_throw_fatal << "not implemented"; } Transport *accept() { gu_throw_fatal << "not implemented"; return 0; } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { send_up(rb, um); } int handle_down(Datagram& wb, const ProtoDownMeta& dm) { if (queue_ == true) { // assert(wb.header().size() == 0); out_.push_back(new Datagram(wb)); return 0; } else { gu_trace(return send_down(wb, ProtoDownMeta(0xff, O_UNRELIABLE, uuid_))); } } Datagram* out() { if (out_.empty()) { return 0; } Datagram* rb = out_.front(); out_.pop_front(); return rb; } }; class DummyNode : public Toplay { public: DummyNode(gu::Config& conf, const size_t index, const std::list& protos) : Toplay (conf), index_ (index), uuid_ (UUID(static_cast(index))), protos_ (protos), cvi_ (), tr_ (), curr_seq_(0) { gcomm_assert(protos_.empty() == false); std::list::iterator i, i_next; i = i_next = protos_.begin(); for (++i_next; i_next != protos_.end(); ++i, ++i_next) { gu_trace(gcomm::connect(*i, *i_next)); } gu_trace(gcomm::connect(*i, this)); } ~DummyNode() { std::list::iterator i, i_next; i = i_next = protos_.begin(); for (++i_next; i_next != protos_.end(); ++i, ++i_next) { gu_trace(gcomm::disconnect(*i, *i_next)); } gu_trace(gcomm::disconnect(*i, this)); std::for_each(protos_.begin(), protos_.end(), gu::DeleteObject()); } const UUID& uuid() const { return uuid_; } std::list& protos() { return protos_; } size_t index() const { return index_; } void connect(bool first) { gu_trace(std::for_each(protos_.rbegin(), protos_.rend(), std::bind2nd( std::mem_fun(&Protolay::connect), first))); } void close(bool force = false) { for (std::list::iterator i = protos_.begin(); i != protos_.end(); ++i) { (*i)->close(); } // gu_trace(std::for_each(protos.rbegin(), protos.rend(), // std::mem_fun(&Protolay::close))); } void close(const UUID& uuid) { for (std::list::iterator i = protos_.begin(); i != protos_.end(); ++i) { (*i)->close(uuid); } // gu_trace(std::for_each(protos.rbegin(), protos.rend(), // std::mem_fun(&Protolay::close))); } void send() { const int64_t seq(curr_seq_); gu::byte_t buf[sizeof(seq)]; size_t sz; gu_trace(sz = gu::serialize8(seq, buf, sizeof(buf), 0)); Datagram dg(gu::Buffer(buf, buf + sz)); int err = send_down(dg, ProtoDownMeta(0)); if (err != 0) { log_debug << "failed to send: " << strerror(err); } else { ++curr_seq_; } } Datagram create_datagram() { const int64_t seq(curr_seq_); gu::byte_t buf[sizeof(seq)]; size_t sz; gu_trace(sz = gu::serialize8(seq, buf, sizeof(buf), 0)); return Datagram (gu::Buffer(buf, buf + sz)); } const Trace& trace() const { return tr_; } void set_cvi(const ViewId& vi) { log_debug << uuid() << " setting cvi to " << vi; cvi_ = vi; } bool in_cvi() const { for (Trace::ViewTraceMap::const_reverse_iterator i( tr_.view_traces().rbegin()); i != tr_.view_traces().rend(); ++i) { if (i->first.uuid() == cvi_.uuid() && i->first.type() == cvi_.type() && i->first.seq() >= cvi_.seq()) { return true; } } return false; } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (rb.len() != 0) { gcomm_assert((um.source() == UUID::nil()) == false); // assert(rb.header().size() == 0); const gu::byte_t* begin(gcomm::begin(rb)); const size_t available(gcomm::available(rb)); // log_debug << um.source() << " " << uuid() // << " " << available ; // log_debug << rb.len() << " " << rb.offset() << " " // << rb.header_len(); if (available != 8) { log_info << "check_trace fail"; } gcomm_assert(available == 8); int64_t seq; gu_trace(gu::unserialize8(begin, available, 0, seq)); tr_.insert_msg(TraceMsg(um.source(), um.source_view_id(), seq)); } else { gcomm_assert(um.has_view() == true); tr_.insert_view(um.view()); } } gu::datetime::Date handle_timers() { std::for_each(protos_.begin(), protos_.end(), std::mem_fun(&Protolay::handle_timers)); return gu::datetime::Date::max(); } private: size_t index_; UUID uuid_; std::list protos_; ViewId cvi_; Trace tr_; int64_t curr_seq_; }; class ChannelMsg { public: ChannelMsg(const Datagram& rb, const UUID& source) : rb_(rb), source_(source) { } const Datagram& rb() const { return rb_; } const UUID& source() const { return source_; } private: Datagram rb_; UUID source_; }; class Channel : public Bottomlay { public: Channel(gu::Config& conf, const size_t ttl = 1, const size_t latency = 1, const double loss = 1.) : Bottomlay(conf), ttl_(ttl), latency_(latency), loss_(loss), queue_() { } ~Channel() { } int handle_down(Datagram& wb, const ProtoDownMeta& dm) { gcomm_assert((dm.source() == UUID::nil()) == false); gu_trace(put(wb, dm.source())); return 0; } void put(const Datagram& rb, const UUID& source); ChannelMsg get(); void set_ttl(const size_t t) { ttl_ = t; } size_t ttl() const { return ttl_; } void set_latency(const size_t l) { gcomm_assert(l > 0); latency_ = l; } size_t latency() const { return latency_; } void set_loss(const double l) { loss_ = l; } double loss() const { return loss_; } size_t n_msgs() const { return queue_.size(); } private: size_t ttl_; size_t latency_; double loss_; std::deque > queue_; }; std::ostream& operator<<(std::ostream& os, const Channel& ch); std::ostream& operator<<(std::ostream& os, const Channel* ch); class MatrixElem { public: MatrixElem(const size_t ii, const size_t jj) : ii_(ii), jj_(jj) { } size_t ii() const { return ii_; } size_t jj() const { return jj_; } bool operator<(const MatrixElem& cmp) const { return (ii_ < cmp.ii_ || (ii_ == cmp.ii_ && jj_ < cmp.jj_)); } private: size_t ii_; size_t jj_; }; std::ostream& operator<<(std::ostream& os, const MatrixElem& me); class ChannelMap : public Map { public: struct DeleteObject { void operator()(ChannelMap::value_type& vt) { delete ChannelMap::value(vt); } }; }; class NodeMap : public Map { public: struct DeleteObject { void operator()(NodeMap::value_type& vt) { delete NodeMap::value(vt); } }; }; class PropagationMatrix { public: PropagationMatrix() : tp_(), prop_() { } ~PropagationMatrix(); void insert_tp(DummyNode* t); void set_latency(const size_t ii, const size_t jj, const size_t lat); void set_loss(const size_t ii, const size_t jj, const double loss); void split(const size_t ii, const size_t jj); void merge(const size_t ii, const size_t jj, const double loss = 1.0); void propagate_n(size_t n); void propagate_until_empty(); void propagate_until_cvi(bool handle_timers); friend std::ostream& operator<<(std::ostream&,const PropagationMatrix&); private: void expire_timers(); size_t count_channel_msgs() const; bool all_in_cvi() const; NodeMap tp_; ChannelMap prop_; }; std::ostream& operator<<(std::ostream& os, const PropagationMatrix& prop); // Cross check traces from vector of dummy nodes void check_trace(const std::vector& nvec); } // namespace gcomm galera-3-25.3.20/gcomm/test/check_types.cpp0000644000015300001660000000447113042054732020232 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "check_gcomm.hpp" #include "gcomm/view.hpp" #include "gcomm/types.hpp" #include "gcomm/map.hpp" #include #include #include using std::pair; using std::make_pair; using std::string; #include "check_templ.hpp" #include using namespace gcomm; START_TEST(test_uuid) { UUID uuid; fail_unless(uuid.full_str() == "00000000-0000-0000-0000-000000000000", "%s", uuid.full_str().c_str()); for (size_t i = 0; i < 159; ++i) { UUID uuidrnd(0, 0); log_debug << uuidrnd; } UUID uuid1(0, 0); UUID uuid2(0, 0); fail_unless(uuid1 < uuid2); // Verify that short UUID notation matches with first 8 chars // of full uuid string. std::string full(uuid1.full_str()); std::ostringstream os; os << uuid1; fail_unless(full.compare(0, 8, os.str()) == 0, "%s != %s", full.c_str(), os.str().c_str()); } END_TEST START_TEST(test_view) { const UUID uuid1(1); const UUID uuid2(2); const UUID uuid3(3); // View id ordering: // 1) view seq less than // 2) uuid newer than (higher timestamp, greater leading bytes) // 3) view type (reg, trans, non-prim, prim) ViewId v1(V_REG, uuid2, 1); ViewId v2(V_REG, uuid1, 1); ViewId v3(V_TRANS, uuid1, 1); ViewId v4(V_TRANS, uuid3, 2); ViewId v5(V_REG, uuid2, 2); ViewId v6(V_REG, uuid1, 2); ViewId v7(V_TRANS, uuid1, 2); fail_unless(v1 < v2); fail_unless(v2 < v3); fail_unless(v3 < v4); fail_unless(v4 < v5); fail_unless(v5 < v6); fail_unless(v6 < v7); ViewId vid; fail_unless(vid.uuid() == UUID()); fail_unless(vid.seq() == 0); UUID uuid(0, 0); vid = ViewId(V_REG, uuid, 7); fail_unless(vid.uuid() == uuid); fail_unless(vid.seq() == 7); NodeList nl; for (size_t i = 0; i < 7; ++i) { nl.insert(make_pair(UUID(0, 0), Node(0))); } fail_unless(nl.size() == 7); } END_TEST Suite* types_suite() { Suite* s = suite_create("types"); TCase* tc; tc = tcase_create("test_uuid"); tcase_add_test(tc, test_uuid); suite_add_tcase(s, tc); tc = tcase_create("test_view"); tcase_add_test(tc, test_view); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/gcomm/test/check_gcomm.cpp0000644000015300001660000000417213042054732020166 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #include "check_gcomm.hpp" #include "gu_string_utils.hpp" // strsplit() #include "gu_exception.hpp" #include "gu_logger.hpp" #include #include #include #include #include #include #include // * suits = 0; if (argc > 1 && !strcmp(argv[1],"nofork")) { srunner_set_fork_status(sr, CK_NOFORK); } else if (argc > 1 && strcmp(argv[1], "nolog") == 0) { /* no log redirection */} else { // running in the background, loggin' to file FILE* log_file = fopen ("check_gcomm.log", "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); // redirect occasional stderr there as well if (dup2(fileno(log_file), 2) < 0) { perror("dup2() failed: "); return EXIT_FAILURE; } } if (::getenv("CHECK_GCOMM_DEBUG")) { gu_log_max_level = GU_LOG_DEBUG; //gu::Logger::enable_debug(true); } log_info << "check_gcomm, start tests"; if (::getenv("CHECK_GCOMM_SUITES")) { suits = new vector(gu::strsplit(::getenv("CHECK_GCOMM_SUITES"), ',')); } for (size_t i = 0; suites[i].suite != 0; ++i) { if (suits == 0 || find(suits->begin(), suits->end(), suites[i].name) != suits->end()) { srunner_add_suite(sr, suites[i].suite()); } } delete suits; suits = 0; srunner_run_all(sr, CK_NORMAL); log_info << "check_gcomm, run all tests"; int n_fail = srunner_ntests_failed(sr); srunner_free(sr); return n_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-3-25.3.20/gcomm/test/check_templ.hpp0000644000015300001660000000461713042054732020216 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_CHECK_TEMPL_HPP #define GCOMM_CHECK_TEMPL_HPP #include "gcomm/types.hpp" #include "gcomm/transport.hpp" #include #include #include namespace gcomm { template void check_serialization(const T& c, const size_t expected_size, const T& default_c) { fail_unless(c.serial_size() == expected_size, "size = %lu expected = %lu", c.serial_size(), expected_size); gu::byte_t* buf = new gu::byte_t[expected_size + 7]; size_t ret; // Check that what is written gets also read try { (void)c.serialize(buf, expected_size, 1); std::ostringstream os; os << c; fail("exception not thrown for %s", os.str().c_str()); } catch (gu::Exception& e) { // OK } fail_unless(c.serialize(buf, expected_size, 0) == expected_size); T c2(default_c); // Commented out. This test happened to work because default // protocol version for messages was zero and if the second // byte of the buffer contained something else, exception was // thrown. Now that the version can be different from zero, // the outcome of this check depends on message structure. // try // { // size_t res(c2.unserialize(buf, expected_size, 1)); // std::ostringstream os; // os << c; // fail("exception not thrown for %s, result %zu expected %zu", // os.str().c_str(), res, expected_size); // } // catch (gu::Exception& e) // { // // OK // } ret = c2.unserialize(buf, expected_size, 0); fail_unless(ret == expected_size, "expected %zu ret %zu", expected_size, ret); if ((c == c2) == false) { log_warn << "\n\t" << c << " !=\n\t" << c2; } fail_unless(c == c2); // Check that read/write return offset properly fail_unless(c.serialize(buf, expected_size + 7, 5) == expected_size + 5); fail_unless(c2.unserialize(buf, expected_size + 7, 5) == expected_size + 5); fail_unless(c == c2); delete[] buf; } } // namespace gcomm #endif // CHECK_TEMPL_HPP galera-3-25.3.20/gcomm/test/SConscript0000644000015300001660000000204313042054732017230 0ustar jenkinsjenkins Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' #/common #/galerautils/src #/gcomm/src ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) gcomm_check = env.Program(target = 'check_gcomm', source = Split(''' check_gcomm.cpp check_trace.cpp check_types.cpp check_util.cpp check_gmcast.cpp check_evs2.cpp check_pc.cpp ''')) env.Test("gcomm_check.passed", gcomm_check) Clean(gcomm_check, '#/check_gcomm.log') ssl_test = env.Program(target = 'ssl_test', source = ['ssl_test.cpp']) galera-3-25.3.20/gcomm/test/check_util.cpp0000644000015300001660000001342213042054732020037 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "gcomm/util.hpp" #include "gcomm/protonet.hpp" #include "gcomm/datagram.hpp" #include "gcomm/conf.hpp" #ifdef HAVE_ASIO_HPP #include "asio_protonet.hpp" #endif // HAVE_ASIO_HPP #include "check_gcomm.hpp" #include "gu_logger.hpp" #include #include #include #include #include using std::vector; using std::numeric_limits; using std::string; using namespace gcomm; using gu::Exception; using gu::byte_t; using gu::Buffer; START_TEST(test_datagram) { // Header check gcomm::NetHeader hdr(42, 0); fail_unless(hdr.len() == 42); fail_unless(hdr.has_crc32() == false); fail_unless(hdr.version() == 0); hdr.set_crc32(1234, NetHeader::CS_CRC32); fail_unless(hdr.has_crc32() == true); fail_unless(hdr.len() == 42); gcomm::NetHeader hdr1(42, 1); fail_unless(hdr1.len() == 42); fail_unless(hdr1.has_crc32() == false); fail_unless(hdr1.version() == 1); gu::byte_t hdrbuf[NetHeader::serial_size_]; fail_unless(serialize(hdr1, hdrbuf, sizeof(hdrbuf), 0) == NetHeader::serial_size_); try { unserialize(hdrbuf, sizeof(hdrbuf), 0, hdr); fail(""); } catch (Exception& e) { // ok } gu::byte_t b[128]; for (gu::byte_t i = 0; i < sizeof(b); ++i) { b[i] = i; } gu::Buffer buf(b, b + sizeof(b)); gcomm::Datagram dg(buf); fail_unless(dg.len() == sizeof(b)); // Normal copy construction gcomm::Datagram dgcopy(buf); fail_unless(dgcopy.len() == sizeof(b)); fail_unless(memcmp(dgcopy.header() + dgcopy.header_offset(), dg.header() + dg.header_offset(), dg.header_len()) == 0); fail_unless(dgcopy.payload() == dg.payload()); // Copy construction from offset of 16 gcomm::Datagram dg16(dg, 16); log_info << dg16.len(); fail_unless(dg16.len() - dg16.offset() == sizeof(b) - 16); for (gu::byte_t i = 0; i < sizeof(b) - 16; ++i) { fail_unless(dg16.payload()[i + dg16.offset()] == i + 16); } #if 0 // Normalize datagram, all data is moved into payload, data from // beginning to offset is discarded. Normalization must not change // dg dg16.normalize(); fail_unless(dg16.len() == sizeof(b) - 16); for (byte_t i = 0; i < sizeof(b) - 16; ++i) { fail_unless(dg16.payload()[i] == i + 16); } fail_unless(dg.len() == sizeof(b)); for (byte_t i = 0; i < sizeof(b); ++i) { fail_unless(dg.payload()[i] == i); } Datagram dgoff(buf, 16); dgoff.header().resize(8); dgoff.set_header_offset(4); fail_unless(dgoff.len() == buf.size() + 4); fail_unless(dgoff.header_offset() == 4); fail_unless(dgoff.header().size() == 8); for (byte_t i = 0; i < 4; ++i) { *(&dgoff.header()[0] + i) = i; } dgoff.normalize(); fail_unless(dgoff.len() == sizeof(b) - 16 + 4); fail_unless(dgoff.header_offset() == 0); fail_unless(dgoff.header().size() == 0); #endif // 0 } END_TEST #if defined(HAVE_ASIO_HPP) START_TEST(test_asio) { gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); AsioProtonet pn(conf); string uri_str("tcp://127.0.0.1:0"); Acceptor* acc = pn.acceptor(uri_str); acc->listen(uri_str); uri_str = acc->listen_addr(); SocketPtr cl = pn.socket(uri_str); cl->connect(uri_str); pn.event_loop(gu::datetime::Sec); SocketPtr sr = acc->accept(); fail_unless(sr->state() == Socket::S_CONNECTED); vector buf(cl->mtu()); for (size_t i = 0; i < buf.size(); ++i) { buf[i] = static_cast(i & 0xff); } for (size_t i = 0; i < 13; ++i) { Datagram dg(Buffer(&buf[0], &buf[0] + buf.size())); cl->send(dg); } pn.event_loop(gu::datetime::Sec); delete acc; } END_TEST #endif // HAVE_ASIO_HPP START_TEST(test_protonet) { gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); Protonet* pn(Protonet::create(conf)); pn->event_loop(1); } END_TEST START_TEST(test_view_state) { // compare view. UUID view_uuid(NULL, 0); ViewId view_id(V_TRANS, view_uuid, 789); UUID m1(NULL, 0); UUID m2(NULL, 0); View view(0, view_id, true); view.add_member(m1, 0); view.add_member(m2, 1); View view2; { std::ostringstream os; view.write_stream(os); std::istringstream is(os.str()); view2.read_stream(is); fail_unless(view == view2); } // Create configuration to set file name. gu::Config conf; // compare view state. UUID my_uuid(NULL, 0); ViewState vst(my_uuid, view, conf); UUID my_uuid_2; View view_2; ViewState vst2(my_uuid_2, view_2, conf); { std::ostringstream os; vst.write_stream(os); std::istringstream is(os.str()); vst2.read_stream(is); fail_unless(vst == vst2); } // test write file and read file. vst.write_file(); UUID my_uuid_3; View view_3; ViewState vst3(my_uuid_3, view_3, conf); vst3.read_file(); fail_unless(vst == vst3); ViewState::remove_file(conf); } END_TEST Suite* util_suite() { Suite* s = suite_create("util"); TCase* tc; tc = tcase_create("test_datagram"); tcase_add_test(tc, test_datagram); suite_add_tcase(s, tc); #ifdef HAVE_ASIO_HPP tc = tcase_create("test_asio"); tcase_add_test(tc, test_asio); suite_add_tcase(s, tc); #endif // HAVE_ASIO_HPP tc = tcase_create("test_protonet"); tcase_add_test(tc, test_protonet); suite_add_tcase(s, tc); tc = tcase_create("test_view_state"); tcase_add_test(tc, test_view_state); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/gcomm/test/check_trace.cpp0000644000015300001660000003171613042054732020166 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy * * $Id$ */ /*! * @brief Check trace implementation */ #include "check_trace.hpp" #include "gcomm/conf.hpp" #include "gu_asio.hpp" // gu::ssl_register_params() using namespace std; using namespace gu; using namespace gcomm; struct CheckTraceConfInit { explicit CheckTraceConfInit(gu::Config& conf) { gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); } }; // This is to avoid static initialization fiasco with gcomm::Conf static members // Ideally it is the latter which should be wrapped in a function, but, unless // this is used to initialize another static object, it should be fine. gu::Config& check_trace_conf() { static gu::Config conf; static CheckTraceConfInit const check_trace_conf_init(conf); return conf; } ostream& gcomm::operator<<(ostream& os, const TraceMsg& msg) { return (os << "(" << msg.source() << "," << msg.source_view_id() << "," << msg.seq() << ")"); } ostream& gcomm::operator<<(ostream& os, const ViewTrace& vtr) { os << vtr.view() << ": "; copy(vtr.msgs().begin(), vtr.msgs().end(), ostream_iterator(os, " ")); return os; } ostream& gcomm::operator<<(ostream& os, const Trace& tr) { os << "trace: \n"; os << tr.view_traces(); return os; } ostream& gcomm::operator<<(ostream& os, const Channel& ch) { return (os << "(" << ch.latency() << "," << ch.loss() << ")"); } ostream& gcomm::operator<<(ostream& os, const Channel* chp) { return (os << *chp); } ostream& gcomm::operator<<(ostream& os, const MatrixElem& me) { return (os << "(" << me.ii() << "," << me.jj() << ")"); } ostream& gcomm::operator<<(ostream& os, const PropagationMatrix& prop) { os << "("; copy(prop.prop_.begin(), prop.prop_.end(), ostream_iterator(os, ",")); os << ")"; return os; } class LinkOp { public: LinkOp(DummyNode& node, ChannelMap& prop) : node_(node), prop_(prop) { } void operator()(NodeMap::value_type& l) { if (NodeMap::key(l) != node_.index()) { ChannelMap::iterator ii; gu_trace(ii = prop_.insert_unique( make_pair(MatrixElem(node_.index(), NodeMap::key(l)), new Channel(check_trace_conf())))); gcomm::connect(ChannelMap::value(ii), node_.protos().front()); gu_trace(ii = prop_.insert_unique( make_pair(MatrixElem(NodeMap::key(l), node_.index()), new Channel(check_trace_conf())))); gcomm::connect(ChannelMap::value(ii), NodeMap::value(l)->protos().front()); } } private: DummyNode& node_; ChannelMap& prop_; }; class PropagateOp { public: PropagateOp(NodeMap& tp) : tp_(tp) { } void operator()(ChannelMap::value_type& vt) { ChannelMsg cmsg(vt.second->get()); if (cmsg.rb().len() != 0) { NodeMap::iterator i(tp_.find(vt.first.jj())); gcomm_assert(i != tp_.end()); gu_trace(NodeMap::value(i)->protos().front()->handle_up( &tp_, cmsg.rb(), ProtoUpMeta(cmsg.source()))); } } private: NodeMap& tp_; }; class ExpireTimersOp { public: ExpireTimersOp() { } void operator()(NodeMap::value_type& vt) { NodeMap::value(vt)->handle_timers(); } }; void gcomm::Channel::put(const Datagram& rb, const UUID& source) { Datagram dg(rb); // if (dg.is_normalized() == false) // { // dg.normalize(); // } queue_.push_back(make_pair(latency_, ChannelMsg(dg, source))); } ChannelMsg gcomm::Channel::get() { while (queue_.empty() == false) { pair& p(queue_.front()); if (p.first == 0) { // todo: packet loss goes here if (loss() < 1.) { double rnd(double(rand())/double(RAND_MAX)); if (loss() < rnd) { queue_.pop_front(); return ChannelMsg(Datagram(), UUID::nil()); } } ChannelMsg ret(p.second); queue_.pop_front(); return ret; } else { --p.first; return ChannelMsg(Datagram(), UUID::nil()); } } return ChannelMsg(Datagram(), UUID::nil()); } gcomm::PropagationMatrix::~PropagationMatrix() { for_each(prop_.begin(), prop_.end(), ChannelMap::DeleteObject()); } void gcomm::PropagationMatrix::insert_tp(DummyNode* t) { gu_trace(tp_.insert_unique(make_pair(t->index(), t))); for_each(tp_.begin(), tp_.end(), LinkOp(*t, prop_)); } void gcomm::PropagationMatrix::set_latency(const size_t ii, const size_t jj, const size_t lat) { ChannelMap::iterator i; gu_trace(i = prop_.find_checked(MatrixElem(ii, jj))); ChannelMap::value(i)->set_latency(lat); } void gcomm::PropagationMatrix::set_loss(const size_t ii, const size_t jj, const double loss) { ChannelMap::iterator i; gu_trace(i = prop_.find_checked(MatrixElem(ii, jj))); ChannelMap::value(i)->set_loss(loss); } void gcomm::PropagationMatrix::split(const size_t ii, const size_t jj) { set_loss(ii, jj, 0.); set_loss(jj, ii, 0.); } void gcomm::PropagationMatrix::merge(const size_t ii, const size_t jj, const double loss) { set_loss(ii, jj, loss); set_loss(jj, ii, loss); } void gcomm::PropagationMatrix::expire_timers() { for_each(tp_.begin(), tp_.end(), ExpireTimersOp()); } void gcomm::PropagationMatrix::propagate_n(size_t n) { while (n-- > 0) { for_each(prop_.begin(), prop_.end(), PropagateOp(tp_)); } } void gcomm::PropagationMatrix::propagate_until_empty() { do { for_each(prop_.begin(), prop_.end(), PropagateOp(tp_)); } while (count_channel_msgs() > 0); } void gcomm::PropagationMatrix::propagate_until_cvi(bool handle_timers) { bool all_in = false; do { propagate_n(10); all_in = all_in_cvi(); if (all_in == false && handle_timers == true) { expire_timers(); } } while (all_in == false); } size_t gcomm::PropagationMatrix::count_channel_msgs() const { size_t ret = 0; for (ChannelMap::const_iterator i = prop_.begin(); i != prop_.end(); ++i) { ret += ChannelMap::value(i)->n_msgs(); } return ret; } bool gcomm::PropagationMatrix::all_in_cvi() const { for (std::map::const_iterator i = tp_.begin(); i != tp_.end(); ++i) { if (i->second->in_cvi() == false) { return false; } } return true; } static void check_traces(const Trace& t1, const Trace& t2) { for (Trace::ViewTraceMap::const_iterator i = t1.view_traces().begin(); i != t1.view_traces().end(); ++i) { Trace::ViewTraceMap::const_iterator j = t2.view_traces().find(Trace::ViewTraceMap::key(i)); if (j == t2.view_traces().end()) continue; ViewType type = i->first.type(); // @todo Proper checks for PRIM and NON_PRIM if (type == V_TRANS || type == V_REG) { Trace::ViewTraceMap::const_iterator i_next(i); ++i_next; Trace::ViewTraceMap::const_iterator j_next(j); ++j_next; if (type == V_TRANS) { // if next reg view is same, then views and msgs are the same. if (i_next != t1.view_traces().end() && j_next != t2.view_traces().end() && i_next->first == j_next->first) { gcomm_assert(*i == *j) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "next views: \n\n" << *i_next << "\n\n" << *j_next; } } if (type == V_REG) { // members are same all the times. gcomm_assert(i->second.view().members() == j->second.view().members()) << "trace differ: \n\n" << *i << "\n\n" << *j; // if next trans view has same members, then msgs are the same. if (i_next != t1.view_traces().end() && j_next != t2.view_traces().end()) { if (i_next->second.view().members() == j_next->second.view().members()) { gcomm_assert(i->second.msgs() == j->second.msgs()) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "next views: \n\n" << *i_next << "\n\n" << *j_next; } else { // if not, then members should be disjoint. std::map output; std::set_intersection(i_next->second.view().members().begin(), i_next->second.view().members().end(), j_next->second.view().members().begin(), j_next->second.view().members().end(), std::inserter(output,output.begin())); gcomm_assert(output.size() == 0) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "next views: \n\n" << *i_next << "\n\n" << *j_next; } } // if previous trans view id is the same. // the reg view should be the same. // if previous trans view id is not same. // intersections of joined, left, partitioned sets are empty. if (i == t1.view_traces().begin() || j == t2.view_traces().begin()) continue; Trace::ViewTraceMap::const_iterator i_prev(i); --i_prev; Trace::ViewTraceMap::const_iterator j_prev(j); --j_prev; if (i_prev->first == j_prev->first) { gcomm_assert(i->second.view() == j->second.view()) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "previous views: \n\n" << *i_prev << "\n\n" << *j_prev; } else { std::map output; int joined_size = 0, left_size = 0, part_size = 0; std::set_intersection(i->second.view().joined().begin(), i->second.view().joined().end(), j->second.view().joined().begin(), j->second.view().joined().end(), std::inserter(output, output.begin())); joined_size = output.size(); output.clear(); std::set_intersection(i->second.view().left().begin(), i->second.view().left().end(), j->second.view().left().begin(), j->second.view().left().end(), std::inserter(output, output.begin())); left_size = output.size(); output.clear(); std::set_intersection(i->second.view().partitioned().begin(), i->second.view().partitioned().end(), j->second.view().partitioned().begin(), j->second.view().partitioned().end(), std::inserter(output, output.begin())); part_size = output.size(); output.clear(); gcomm_assert(i->second.view().members() == j->second.view().members() && joined_size == 0 && left_size == 0 && part_size == 0) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "previous views: \n\n" << *i_prev << "\n\n" << *j_prev; } } } } } class CheckTraceOp { public: CheckTraceOp(const vector& nvec) : nvec_(nvec) { } void operator()(const DummyNode* n) const { for (vector::const_iterator i = nvec_.begin(); i != nvec_.end(); ++i) { if ((*i)->index() != n->index()) { gu_trace(check_traces((*i)->trace(), n->trace())); } } } private: const vector& nvec_; }; void gcomm::check_trace(const vector& nvec) { for_each(nvec.begin(), nvec.end(), CheckTraceOp(nvec)); } galera-3-25.3.20/gcomm/test/check_gmcast.cpp0000644000015300001660000002657113042054732020351 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "check_gcomm.hpp" #include "gcomm/protostack.hpp" #include "gcomm/conf.hpp" #include "gmcast.hpp" #include "gmcast_message.hpp" #include "gu_asio.hpp" // gu::ssl_register_params() using namespace std; using namespace gcomm; using namespace gcomm::gmcast; using namespace gu::datetime; using gu::byte_t; using gu::Buffer; #include // // run_all_tests is set tuo true by default. To disable gmcast tests // which use real TCP transport, set GALERA_TEST_DETERMINISTIC env // variable before running gmcast test suite. // static bool run_all_tests(true); static struct run_all_gmcast_tests { public: run_all_gmcast_tests() { if (::getenv("GALERA_TEST_DETERMINISTIC")) { run_all_tests = false; } else { run_all_tests = true; } } } run_all_gmcast_tests; // Note: Multicast test(s) not run by default. static bool test_multicast(false); string mcast_param("gmcast.mcast_addr=239.192.0.11&gmcast.mcast_port=4567"); START_TEST(test_gmcast_multicast) { string uri1("gmcast://?gmcast.group=test&gmcast.mcast_addr=239.192.0.11"); gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); auto_ptr pnet(Protonet::create(conf)); Transport* gm1(Transport::create(*pnet, uri1)); gm1->connect(); gm1->close(); delete gm1; } END_TEST START_TEST(test_gmcast_w_user_messages) { class User : public Toplay { Transport* tp_; size_t recvd_; Protostack pstack_; explicit User(const User&); void operator=(User&); public: User(Protonet& pnet, const std::string& listen_addr, const std::string& remote_addr) : Toplay(pnet.conf()), tp_(0), recvd_(0), pstack_() { string uri("gmcast://"); uri += remote_addr; // != 0 ? remote_addr : ""; uri += "?"; uri += "tcp.non_blocking=1"; uri += "&"; uri += "gmcast.group=testgrp"; uri += "&gmcast.time_wait=PT0.5S"; if (test_multicast == true) { uri += "&" + mcast_param; } uri += "&gmcast.listen_addr=tcp://"; uri += listen_addr; tp_ = Transport::create(pnet, uri); } ~User() { delete tp_; } void start(const std::string& peer = "") { if (peer == "") { tp_->connect(); } else { tp_->connect(peer); } pstack_.push_proto(tp_); pstack_.push_proto(this); } void stop() { pstack_.pop_proto(this); pstack_.pop_proto(tp_); tp_->close(); } void handle_timer() { byte_t buf[16]; memset(buf, 0xa5, sizeof(buf)); Datagram dg(Buffer(buf, buf + sizeof(buf))); send_down(dg, ProtoDownMeta()); } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (rb.len() < rb.offset() + 16) { gu_throw_fatal << "offset error"; } char buf[16]; memset(buf, 0xa5, sizeof(buf)); // cppcheck-suppress uninitstring if (memcmp(buf, &rb.payload()[0] + rb.offset(), 16) != 0) { gu_throw_fatal << "content mismatch"; } recvd_++; } size_t recvd() const { return recvd_; } void set_recvd(size_t val) { recvd_ = val; } Protostack& pstack() { return pstack_; } std::string listen_addr() const { return tp_->listen_addr(); } }; log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); mark_point(); auto_ptr pnet(Protonet::create(conf)); mark_point(); User u1(*pnet, "127.0.0.1:0", ""); pnet->insert(&u1.pstack()); log_info << "u1 start"; u1.start(); pnet->event_loop(Sec/10); fail_unless(u1.recvd() == 0); log_info << "u2 start"; User u2(*pnet, "127.0.0.1:0", u1.listen_addr().erase(0, strlen("tcp://"))); pnet->insert(&u2.pstack()); u2.start(); while (u1.recvd() <= 50 || u2.recvd() <= 50) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } log_info << "u3 start"; User u3(*pnet, "127.0.0.1:0", u2.listen_addr().erase(0, strlen("tcp://"))); pnet->insert(&u3.pstack()); u3.start(); while (u3.recvd() <= 50) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } log_info << "u4 start"; User u4(*pnet, "127.0.0.1:0", u2.listen_addr().erase(0, strlen("tcp://"))); pnet->insert(&u4.pstack()); u4.start(); while (u4.recvd() <= 50) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } log_info << "u1 stop"; u1.stop(); pnet->erase(&u1.pstack()); pnet->event_loop(3*Sec); log_info << "u1 start"; pnet->insert(&u1.pstack()); u1.start(u2.listen_addr()); u1.set_recvd(0); u2.set_recvd(0); u3.set_recvd(0); u4.set_recvd(0); for (size_t i(0); i < 30; ++i) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } fail_unless(u1.recvd() != 0); fail_unless(u2.recvd() != 0); fail_unless(u3.recvd() != 0); fail_unless(u4.recvd() != 0); pnet->erase(&u4.pstack()); pnet->erase(&u3.pstack()); pnet->erase(&u2.pstack()); pnet->erase(&u1.pstack()); u1.stop(); u2.stop(); u3.stop(); u4.stop(); pnet->event_loop(0); } END_TEST // not run by default, hard coded port START_TEST(test_gmcast_auto_addr) { log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); auto_ptr pnet(Protonet::create(conf)); Transport* tp1 = Transport::create(*pnet, "gmcast://?gmcast.group=test"); Transport* tp2 = Transport::create(*pnet, "gmcast://127.0.0.1:4567" "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:10002"); pnet->insert(&tp1->pstack()); pnet->insert(&tp2->pstack()); tp1->connect(); tp2->connect(); pnet->event_loop(Sec); pnet->erase(&tp2->pstack()); pnet->erase(&tp1->pstack()); tp1->close(); tp2->close(); delete tp1; delete tp2; pnet->event_loop(0); } END_TEST START_TEST(test_gmcast_forget) { gu_conf_self_tstamp_on(); log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); auto_ptr pnet(Protonet::create(conf)); Transport* tp1 = Transport::create(*pnet, "gmcast://" "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0"); pnet->insert(&tp1->pstack()); tp1->connect(); Transport* tp2 = Transport::create(*pnet, std::string("gmcast://") + tp1->listen_addr().erase( 0, strlen("tcp://")) + "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0"); Transport* tp3 = Transport::create(*pnet, std::string("gmcast://") + tp1->listen_addr().erase( 0, strlen("tcp://")) + "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0"); pnet->insert(&tp2->pstack()); pnet->insert(&tp3->pstack()); tp2->connect(); tp3->connect(); pnet->event_loop(Sec); UUID uuid1 = tp1->uuid(); tp1->close(); tp2->close(uuid1); tp3->close(uuid1); pnet->event_loop(10*Sec); tp1->connect(); // @todo Implement this using User class above and verify that // tp2 and tp3 communicate with each other but now with tp1 log_info << "####"; pnet->event_loop(Sec); pnet->erase(&tp3->pstack()); pnet->erase(&tp2->pstack()); pnet->erase(&tp1->pstack()); tp1->close(); tp2->close(); tp3->close(); delete tp1; delete tp2; delete tp3; pnet->event_loop(0); } END_TEST // not run by default, hard coded port START_TEST(test_trac_380) { gu_conf_self_tstamp_on(); log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); std::auto_ptr pnet(gcomm::Protonet::create(conf)); // caused either assertion or exception gcomm::Transport* tp1(gcomm::Transport::create( *pnet, "gmcast://127.0.0.1:4567?" "gmcast.group=test")); pnet->insert(&tp1->pstack()); tp1->connect(); try { pnet->event_loop(Sec); } catch (gu::Exception& e) { fail_unless(e.get_errno() == EINVAL, "unexpected errno: %d, cause %s", e.get_errno(), e.what()); } pnet->erase(&tp1->pstack()); tp1->close(); delete tp1; pnet->event_loop(0); } END_TEST START_TEST(test_trac_828) { gu_conf_self_tstamp_on(); log_info << "START (test_trac_828)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); std::auto_ptr pnet(gcomm::Protonet::create(conf)); // If the bug is present, this will throw because of own address being // in address list. try { Transport* tp(gcomm::Transport::create( *pnet, "gmcast://127.0.0.1:4567?" "gmcast.group=test&" "gmcast.listen_addr=tcp://127.0.0.1:4567")); delete tp; } catch (gu::Exception& e) { fail("test_trac_828, expcetion thrown because of having own address " "in address list"); } } END_TEST Suite* gmcast_suite() { Suite* s = suite_create("gmcast"); TCase* tc; if (run_all_tests == true) { if (test_multicast == true) { tc = tcase_create("test_gmcast_multicast"); tcase_add_test(tc, test_gmcast_multicast); suite_add_tcase(s, tc); } tc = tcase_create("test_gmcast_w_user_messages"); tcase_add_test(tc, test_gmcast_w_user_messages); tcase_set_timeout(tc, 30); suite_add_tcase(s, tc); // not run by default, hard coded port tc = tcase_create("test_gmcast_auto_addr"); tcase_add_test(tc, test_gmcast_auto_addr); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_forget"); tcase_add_test(tc, test_gmcast_forget); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); // not run by default, hard coded port tc = tcase_create("test_trac_380"); tcase_add_test(tc, test_trac_380); suite_add_tcase(s, tc); tc = tcase_create("test_trac_828"); tcase_add_test(tc, test_trac_828); suite_add_tcase(s, tc); } return s; } galera-3-25.3.20/gcomm/test/check_evs2.cpp0000644000015300001660000017341013042054732017745 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy * * $Id$ */ /*! * @file Unit tests for refactored EVS */ #include "evs_proto.hpp" #include "evs_input_map2.hpp" #include "evs_message2.hpp" #include "evs_seqno.hpp" #include "check_gcomm.hpp" #include "check_templ.hpp" #include "check_trace.hpp" #include "gcomm/conf.hpp" #include "gu_asio.hpp" // gu::ssl_register_params() #include #include #include #include "check.h" // // run_all_evs_tests is set to true by default. To disable pc tests // which use real TCP transport or depend on wall clock, // set GALERA_TEST_DETERMINISTIC env // variable before running pc test suite. // static class run_all_evs_tests { public: run_all_evs_tests() : run_all_tests_() { if (::getenv("GALERA_TEST_DETERMINISTIC")) { run_all_tests_ = false; } else { run_all_tests_ = true; } } bool operator()() const { return run_all_tests_; } private: bool run_all_tests_; } run_all_evs_tests; using namespace std; using namespace std::rel_ops; using namespace gu::datetime; using namespace gcomm; using namespace gcomm::evs; using gu::DeleteObject; void init_rand() { unsigned int seed(static_cast(time(0))); log_info << "rand seed " << seed; srand(seed); } void init_rand(unsigned int seed) { log_info << "rand seed " << seed; srand(seed); } START_TEST(test_range) { log_info << "START"; Range r(3, 6); check_serialization(r, 2 * sizeof(seqno_t), Range()); } END_TEST START_TEST(test_message) { log_info << "START"; UUID uuid1(0, 0); ViewId view_id(V_TRANS, uuid1, 4567); seqno_t seq(478), aru_seq(456), seq_range(7); UserMessage um(0, uuid1, view_id, seq, aru_seq, seq_range, O_SAFE, 75433, 0xab, Message::F_SOURCE); fail_unless(um.serial_size() % 4 == 0); check_serialization(um, um.serial_size(), UserMessage()); AggregateMessage am(0xab, 17457, 0x79); check_serialization(am, 4, AggregateMessage()); DelegateMessage dm(0, uuid1, view_id); dm.set_source(uuid1); check_serialization(dm, dm.serial_size(), DelegateMessage()); MessageNodeList node_list; node_list.insert(make_pair(uuid1, MessageNode())); node_list.insert(make_pair(UUID(2), MessageNode(true, false, 254, true, 1, ViewId(V_REG), 5, Range(7, 8)))); JoinMessage jm(0, uuid1, view_id, 8, 5, 27, node_list); jm.set_source(uuid1); check_serialization(jm, jm.serial_size(), JoinMessage()); InstallMessage im(0, uuid1, view_id, ViewId(V_REG, view_id.uuid(), view_id.seq()), 8, 5, 27, node_list); im.set_source(uuid1); check_serialization(im, im.serial_size(), InstallMessage()); LeaveMessage lm(0, uuid1, view_id, 45, 88, 3456); lm.set_source(uuid1); check_serialization(lm, lm.serial_size(), LeaveMessage()); DelayedListMessage dlm(0, uuid1, view_id, 4576); dlm.add(UUID(2), 23); dlm.add(UUID(3), 45); dlm.add(UUID(5), 255); check_serialization(dlm, dlm.serial_size(), DelayedListMessage()); } END_TEST START_TEST(test_input_map_insert) { log_info << "START"; UUID uuid1(1), uuid2(2); InputMap im; ViewId view(V_REG, uuid1, 0); try { im.insert(0, UserMessage(0, uuid1, view, 0)); fail(""); } catch (...) { } im.reset(1); im.insert(0, UserMessage(0, uuid1, view, 0)); im.clear(); im.reset(2); for (seqno_t s = 0; s < 10; ++s) { im.insert(0, UserMessage(0, uuid1, view, s)); im.insert(1, UserMessage(0, uuid2, view, s)); } for (seqno_t s = 0; s < 10; ++s) { InputMap::iterator i = im.find(0, s); fail_if(i == im.end()); fail_unless(InputMapMsgIndex::value(i).msg().source() == uuid1); fail_unless(InputMapMsgIndex::value(i).msg().seq() == s); i = im.find(1, s); fail_if(i == im.end()); fail_unless(InputMapMsgIndex::value(i).msg().source() == uuid2); fail_unless(InputMapMsgIndex::value(i).msg().seq() == s); } } END_TEST START_TEST(test_input_map_find) { log_info << "START"; InputMap im; UUID uuid1(1); ViewId view(V_REG, uuid1, 0); im.reset(1); im.insert(0, UserMessage(0, uuid1, view, 0)); fail_if(im.find(0, 0) == im.end()); im.insert(0, UserMessage(0, uuid1, view, 2)); im.insert(0, UserMessage(0, uuid1, view, 4)); im.insert(0, UserMessage(0, uuid1, view, 7)); fail_if(im.find(0, 2) == im.end()); fail_if(im.find(0, 4) == im.end()); fail_if(im.find(0, 7) == im.end()); fail_unless(im.find(0, 3) == im.end()); fail_unless(im.find(0, 5) == im.end()); fail_unless(im.find(0, 6) == im.end()); fail_unless(im.find(0, 8) == im.end()); } END_TEST START_TEST(test_input_map_safety) { log_info << "START"; InputMap im; UUID uuid1(1); size_t index1(0); ViewId view(V_REG, uuid1, 0); im.reset(1); im.insert(index1, UserMessage(0, uuid1, view, 0)); fail_unless(im.aru_seq() == 0); im.insert(index1, UserMessage(0, uuid1, view, 1)); fail_unless(im.aru_seq() == 1); im.insert(index1, UserMessage(0, uuid1, view, 2)); fail_unless(im.aru_seq() == 2); im.insert(index1, UserMessage(0, uuid1, view, 3)); fail_unless(im.aru_seq() == 3); im.insert(index1, UserMessage(0, uuid1, view, 5)); fail_unless(im.aru_seq() == 3); im.insert(index1, UserMessage(0, uuid1, view, 4)); fail_unless(im.aru_seq() == 5); InputMap::iterator i = im.find(index1, 0); fail_unless(im.is_fifo(i) == true); fail_unless(im.is_agreed(i) == true); fail_if(im.is_safe(i) == true); im.set_safe_seq(index1, 0); fail_unless(im.is_safe(i) == true); im.set_safe_seq(index1, 5); i = im.find(index1, 5); fail_unless(im.is_safe(i) == true); im.insert(index1, UserMessage(0, uuid1, view, 7)); im.set_safe_seq(index1, im.aru_seq()); i = im.find(index1, 7); fail_if(im.is_safe(i) == true); } END_TEST START_TEST(test_input_map_erase) { log_info << "START"; InputMap im; size_t index1(0); UUID uuid1(1); ViewId view(V_REG, uuid1, 1); im.reset(1); for (seqno_t s = 0; s < 10; ++s) { im.insert(index1, UserMessage(0, uuid1, view, s)); } for (seqno_t s = 0; s < 10; ++s) { InputMap::iterator i = im.find(index1, s); fail_unless(i != im.end()); im.erase(i); i = im.find(index1, s); fail_unless(i == im.end()); (void)im.recover(index1, s); } im.set_safe_seq(index1, 9); try { im.recover(index1, 9); fail(""); } catch (...) { } } END_TEST START_TEST(test_input_map_overwrap) { log_info << "START"; InputMap im; const size_t n_nodes(5); ViewId view(V_REG, UUID(1), 1); vector uuids; for (size_t n = 0; n < n_nodes; ++n) { uuids.push_back(UUID(static_cast(n + 1))); } im.reset(n_nodes); Date start(Date::now()); size_t cnt(0); seqno_t last_safe(-1); for (seqno_t seq = 0; seq < 100000; ++seq) { for (size_t i = 0; i < n_nodes; ++i) { UserMessage um(0, uuids[i], view, seq); (void)im.insert(i, um); if ((seq + 5) % 10 == 0) { last_safe = um.seq() - 3; im.set_safe_seq(i, last_safe); for (InputMap::iterator ii = im.begin(); ii != im.end() && im.is_safe(ii) == true; ii = im.begin()) { im.erase(ii); } } cnt++; } gcomm_assert(im.aru_seq() == seq); gcomm_assert(im.safe_seq() == last_safe); } Date stop(Date::now()); double div(double(stop.get_utc() - start.get_utc())/gu::datetime::Sec); log_info << "input map msg rate " << double(cnt)/div; } END_TEST class InputMapInserter { public: InputMapInserter(InputMap& im) : im_(im) { } void operator()(const pair& p) const { im_.insert(p.first, p.second); } private: InputMap& im_; }; START_TEST(test_input_map_random_insert) { log_info << "START"; init_rand(); seqno_t window(1024); seqno_t n_seqnos(1024); size_t n_uuids(4); vector uuids(n_uuids); vector > msgs(static_cast(n_uuids*n_seqnos)); ViewId view_id(V_REG, UUID(1), 1); InputMap im; for (size_t i = 0; i < n_uuids; ++i) { uuids[i] = (static_cast(i + 1)); } im.reset(n_uuids, window); for (seqno_t j = 0; j < n_seqnos; ++j) { for (size_t i = 0; i < n_uuids; ++i) { msgs[static_cast(j*n_uuids) + i] = make_pair(i, UserMessage(0, uuids[i], view_id, j)); } } vector > random_msgs(msgs); random_shuffle(random_msgs.begin(), random_msgs.end()); for_each(random_msgs.begin(), random_msgs.end(), InputMapInserter(im)); size_t n = 0; for (InputMap::iterator i = im.begin(); i != im.end(); ++i) { const InputMapMsg& msg(InputMapMsgIndex::value(i)); fail_unless(msg.msg() == msgs[n].second); fail_if(im.is_safe(i) == true); ++n; } fail_unless(im.aru_seq() == n_seqnos - 1); fail_unless(im.safe_seq() == -1); for (size_t i = 0; i < n_uuids; ++i) { fail_unless(im.range(i) == Range(n_seqnos, n_seqnos - 1)); im.set_safe_seq(i, n_seqnos - 1); } fail_unless(im.safe_seq() == n_seqnos - 1); } END_TEST static Datagram* get_msg(DummyTransport* tp, Message* msg, bool release = true) { Datagram* rb = tp->out(); if (rb != 0) { gu_trace(Proto::unserialize_message(tp->uuid(), *rb, msg)); if (release == true) { delete rb; } } return rb; } static void single_join(DummyTransport* t, Proto* p) { Message jm, im, gm; // Initial state is joining p->shift_to(Proto::S_JOINING); // Send join must produce emitted join message p->send_join(); Datagram* rb = get_msg(t, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); // Install message is emitted at the end of JOIN handling // 'cause this is the only instance and is always consistent // with itself rb = get_msg(t, &im); fail_unless(rb != 0); fail_unless(im.type() == Message::T_INSTALL); // Handling INSTALL message emits three gap messages, // one for receiving install message (commit gap), one for // shift to install and one for shift to operational rb = get_msg(t, &gm); fail_unless(rb != 0); fail_unless(gm.type() == Message::T_GAP); fail_unless((gm.flags() & Message::F_COMMIT) != 0); rb = get_msg(t, &gm); fail_unless(rb != 0); fail_unless(gm.type() == Message::T_GAP); fail_unless((gm.flags() & Message::F_COMMIT) == 0); rb = get_msg(t, &gm); fail_unless(rb != 0); fail_unless(gm.type() == Message::T_GAP); fail_unless((gm.flags() & Message::F_COMMIT) == 0); // State must have evolved JOIN -> S_GATHER -> S_INSTALL -> S_OPERATIONAL fail_unless(p->state() == Proto::S_OPERATIONAL); // Handle join message again, must stay in S_OPERATIONAL, must not // emit anything p->handle_msg(jm); rb = get_msg(t, &gm); fail_unless(rb == 0); fail_unless(p->state() == Proto::S_OPERATIONAL); } class DummyUser : public Toplay { public: DummyUser(gu::Config& conf) : Toplay(conf) { } void handle_up(const void*, const Datagram&, const ProtoUpMeta&) { } private: }; START_TEST(test_proto_single_join) { log_info << "START"; gu::Config conf; mark_point(); gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid(1); DummyTransport t(uuid); mark_point(); DummyUser u(conf); mark_point(); Proto p(conf, uuid, 0); mark_point(); gcomm::connect(&t, &p); gcomm::connect(&p, &u); single_join(&t, &p); } END_TEST static void double_join(DummyTransport* t1, Proto* p1, DummyTransport* t2, Proto* p2) { Message jm; Message im; Message gm; Message gm2; Message msg; Datagram* rb; // Initial states check p2->shift_to(Proto::S_JOINING); fail_unless(p1->state() == Proto::S_OPERATIONAL); fail_unless(p2->state() == Proto::S_JOINING); // Send join message, don't self handle immediately // Expected output: one join message p2->send_join(false); fail_unless(p2->state() == Proto::S_JOINING); rb = get_msg(t2, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(t2, &msg); fail_unless(rb == 0); // Handle node 2's join on node 1 // Expected output: shift to S_GATHER and one join message p1->handle_msg(jm); fail_unless(p1->state() == Proto::S_GATHER); rb = get_msg(t1, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(t1, &msg); fail_unless(rb == 0); // Handle node 1's join on node 2 // Expected output: shift to S_GATHER and one join message p2->handle_msg(jm); fail_unless(p2->state() == Proto::S_GATHER); rb = get_msg(t2, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(t2, &msg); fail_unless(rb == 0); // Handle node 2's join on node 1 // Expected output: Install and commit gap messages, state stays in S_GATHER p1->handle_msg(jm); fail_unless(p1->state() == Proto::S_GATHER); rb = get_msg(t1, &im); fail_unless(rb != 0); fail_unless(im.type() == Message::T_INSTALL); rb = get_msg(t1, &gm); fail_unless(rb != 0); fail_unless(gm.type() == Message::T_GAP); fail_unless((gm.flags() & Message::F_COMMIT) != 0); rb = get_msg(t1, &msg); fail_unless(rb == 0); // Handle install message on node 2 // Expected output: commit gap message and state stays in S_RECOVERY p2->handle_msg(im); fail_unless(p2->state() == Proto::S_GATHER); rb = get_msg(t2, &gm2); fail_unless(rb != 0); fail_unless(gm2.type() == Message::T_GAP); fail_unless((gm2.flags() & Message::F_COMMIT) != 0); rb = get_msg(t2, &msg); fail_unless(rb == 0); // Handle gap messages // Expected output: Both nodes shift to S_INSTALL, // both send gap messages p1->handle_msg(gm2); fail_unless(p1->state() == Proto::S_INSTALL); Message gm12; rb = get_msg(t1, &gm12); fail_unless(rb != 0); fail_unless(gm12.type() == Message::T_GAP); fail_unless((gm12.flags() & Message::F_COMMIT) == 0); rb = get_msg(t1, &msg); fail_unless(rb == 0); p2->handle_msg(gm); fail_unless(p2->state() == Proto::S_INSTALL); Message gm22; rb = get_msg(t2, &gm22); fail_unless(rb != 0); fail_unless(gm22.type() == Message::T_GAP); fail_unless((gm22.flags() & Message::F_COMMIT) == 0); rb = get_msg(t2, &msg); fail_unless(rb == 0); // Handle final gap messages, expected output shift to operational // and gap message p1->handle_msg(gm22); fail_unless(p1->state() == Proto::S_OPERATIONAL); rb = get_msg(t1, &msg); fail_unless(rb != 0); fail_unless(msg.type() == Message::T_GAP); fail_unless((msg.flags() & Message::F_COMMIT) == 0); rb = get_msg(t1, &msg); fail_unless(rb == 0); p2->handle_msg(gm12); fail_unless(p2->state() == Proto::S_OPERATIONAL); rb = get_msg(t2, &msg); fail_unless(rb != 0); fail_unless(msg.type() == Message::T_GAP); fail_unless((msg.flags() & Message::F_COMMIT) == 0); rb = get_msg(t2, &msg); fail_unless(rb == 0); } START_TEST(test_proto_double_join) { log_info << "START"; gu::Config conf; mark_point(); gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1), uuid2(2); DummyTransport t1(uuid1), t2(uuid2); mark_point(); DummyUser u1(conf), u2(conf); mark_point(); Proto p1(conf, uuid1, 0), p2(conf, uuid2, 0); gcomm::connect(&t1, &p1); gcomm::connect(&p1, &u1); gcomm::connect(&t2, &p2); gcomm::connect(&p2, &u2); single_join(&t1, &p1); double_join(&t1, &p1, &t2, &p2); } END_TEST static gu::Config gu_conf; static DummyNode* create_dummy_node(size_t idx, int version, const string& suspect_timeout = "PT1H", const string& inactive_timeout = "PT1H", const string& retrans_period = "PT10M") { // reset conf to avoid stale config in case of nofork gu_conf = gu::Config(); gu::ssl_register_params(gu_conf); gcomm::Conf::register_params(gu_conf); string conf = "evs://?" + Conf::EvsViewForgetTimeout + "=PT1H&" + Conf::EvsInactiveCheckPeriod + "=" + to_string(Period(suspect_timeout)/3) + "&" + Conf::EvsSuspectTimeout + "=" + suspect_timeout + "&" + Conf::EvsInactiveTimeout + "=" + inactive_timeout + "&" + Conf::EvsKeepalivePeriod + "=" + retrans_period + "&" + Conf::EvsJoinRetransPeriod + "=" + retrans_period + "&" + Conf::EvsInfoLogMask + "=0x7" + "&" + Conf::EvsVersion + "=" + gu::to_string(version); if (::getenv("EVS_DEBUG_MASK") != 0) { conf += "&" + Conf::EvsDebugLogMask + "=" + ::getenv("EVS_DEBUG_MASK"); } list protos; UUID uuid(static_cast(idx)); protos.push_back(new DummyTransport(uuid, false)); protos.push_back(new Proto(gu_conf, uuid, 0, conf)); return new DummyNode(gu_conf, idx, protos); } namespace { gcomm::evs::Proto* evs_from_dummy(DummyNode* dn) { return reinterpret_cast(dn->protos().back()); } } static void join_node(PropagationMatrix* p, DummyNode* n, bool first = false) { gu_trace(p->insert_tp(n)); gu_trace(n->connect(first)); } static void send_n(DummyNode* node, const size_t n) { for (size_t i = 0; i < n; ++i) { gu_trace(node->send()); } } static void set_cvi(vector& nvec, size_t i_begin, size_t i_end, size_t seq) { for (size_t i = i_begin; i <= i_end; ++i) { nvec[i]->set_cvi(ViewId(V_REG, nvec[i_begin]->uuid(), static_cast(seq))); } } template class ViewSeq { public: ViewSeq() { } bool operator()(const C& a, const C& b) const { return (a->trace().current_view_trace().view().id().seq() < b->trace().current_view_trace().view().id().seq()); } }; static uint32_t get_max_view_seq(const std::vector& dnv, size_t i, size_t j) { if (i == dnv.size()) return static_cast(-1); return (*std::max_element(dnv.begin() + i, dnv.begin() + j, ViewSeq()))->trace().current_view_trace().view().id().seq(); } START_TEST(test_proto_join_n) { log_info << "START (join_n)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back(create_dummy_node(i, 0))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(false)); max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_join_n_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (join_n_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; // @todo This test should terminate without these timeouts const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j <= i; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } gu_trace(prop.propagate_until_empty()); for (size_t j = 0; j <= i; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_join_n_lossy) { gu_conf_self_tstamp_on(); log_info << "START (join_n_lossy)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_join_n_lossy_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (join_n_lossy_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < i; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n) { gu_conf_self_tstamp_on(); log_info << "START (leave_n)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back(create_dummy_node(i, 0))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(true)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { dn[i]->close(); dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (leave_n_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = i; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->close(); dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n_lossy) { gu_conf_self_tstamp_on(); log_info << "START (leave_n_lossy)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } } for (size_t i = 0; i < n_nodes; ++i) { dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); dn[i]->close(); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n_lossy_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (leave_n_lossy_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = i; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); dn[i]->close(); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST // Generic test code for split/merge cases static void test_proto_split_merge_gen(const size_t n_nodes, const bool send_msgs, const double loss) { PropagationMatrix prop; vector dn; const string suspect_timeout("PT1.2S"); const string inactive_timeout("PT1.2S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, loss); prop.set_loss(j, i + 1, loss); } } vector split; for (size_t i = 0; i < n_nodes; ++i) { split.push_back(static_cast(i + 1)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 1; i < n_nodes; ++i) { if (send_msgs == true) { for (size_t k = 0; k < 5; ++k) { for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 1 + j)); } gu_trace(prop.propagate_n(7)); } } log_info << "split " << i; for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { gu_trace(prop.set_loss(split[j], split[k], 0.)); gu_trace(prop.set_loss(split[k], split[j], 0.)); } } set_cvi(dn, 0, i - 1, max_view_seq + 1); set_cvi(dn, i, n_nodes - 1, max_view_seq + 1); if (send_msgs == true) { for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + rand() % 4)); } } gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, n_nodes); log_info << "merge " << i; for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { gu_trace(prop.set_loss(split[j], split[k], loss)); gu_trace(prop.set_loss(split[k], split[j], loss)); } } set_cvi(dn, 0, n_nodes - 1, max_view_seq + 1); if (send_msgs == true) { for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + rand() % 4)); } } gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, n_nodes); } gu_trace(prop.propagate_until_empty()); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } START_TEST(test_proto_split_merge) { gu_conf_self_tstamp_on(); log_info << "START (split_merge)"; init_rand(); test_proto_split_merge_gen(4, false, 1.); } END_TEST START_TEST(test_proto_split_merge_lossy) { gu_conf_self_tstamp_on(); log_info << "START (split_merge_lossy)"; init_rand(); test_proto_split_merge_gen(4, false, .9); } END_TEST START_TEST(test_proto_split_merge_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (split_merge_w_user_msg)"; init_rand(); test_proto_split_merge_gen(4, true, 1.); } END_TEST START_TEST(test_proto_split_merge_lossy_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (split_merge_lossy_w_user_msg)"; init_rand(); test_proto_split_merge_gen(4, true, .9); } END_TEST START_TEST(test_proto_stop_cont) { log_info << "START"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.31S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t view_seq = n_nodes + 1; for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 0; j < n_nodes; ++j) { if (j != i) { dn[j]->close(dn[i]->uuid()); } } set_cvi(dn, 0, n_nodes - 1, view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); view_seq += 2; } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_arbitrate) { log_info << "START"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT0.5S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t view_seq = n_nodes + 1; dn[0]->close(dn[1]->uuid()); dn[1]->close(dn[0]->uuid()); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq)); dn[2]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), view_seq)); gu_trace(prop.propagate_until_cvi(true)); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_split_two) { log_info << "START"; const size_t n_nodes(2); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.31S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t view_seq = n_nodes + 1; dn[0]->close(dn[1]->uuid()); dn[1]->close(dn[0]->uuid()); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), view_seq)); gu_trace(prop.propagate_until_cvi(true)); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_aggreg) { log_info << "START"; const size_t n_nodes(2); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.31S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(send_n(dn[i], 8)); } gu_trace(prop.propagate_until_empty()); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_538) { gu_conf_self_tstamp_on(); log_info << "START (test_trac_538)"; init_rand(); const size_t n_nodes(5); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT2S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes - 1; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes - 1)); gu_trace(join_node(&prop, dn[n_nodes - 1], false)); for (size_t i = 1; i <= n_nodes; ++i) { if (i != n_nodes - 1) { prop.set_loss(i, n_nodes - 1, 0); prop.set_loss(n_nodes - 1, i, 0); } } set_cvi(dn, 0, n_nodes - 1, max_view_seq + 1); dn[n_nodes - 2]->set_cvi(ViewId(V_REG, n_nodes - 1, max_view_seq + 1)); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_552) { gu_conf_self_tstamp_on(); log_info << "START (trac_552)"; init_rand(); const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[0]->set_cvi(V_REG); dn[1]->set_cvi(V_REG); set_cvi(dn, 2, n_nodes - 1, max_view_seq + 1); dn[0]->close(); dn[1]->close(); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_607) { gu_conf_self_tstamp_on(); log_info << "START (trac_607)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); dn[0]->set_cvi(V_REG); dn[0]->close(); while (evs_from_dummy(dn[1])->state() != Proto::S_INSTALL) { prop.propagate_n(1); } // this used to cause exception: // Forbidden state transition: INSTALL -> LEAVING (FATAL) dn[1]->close(); // expected behavior: // dn[1], dn[2] reach S_OPERATIONAL and then dn[1] leaves gracefully set_cvi(dn, 1, n_nodes - 1, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, n_nodes); dn[1]->set_cvi(V_REG); set_cvi(dn, 2, 2, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_724) { gu_conf_self_tstamp_on(); log_info << "START (trac_724)"; init_rand(); const size_t n_nodes(2); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } // Slightly asymmetric settings and evs.use_aggregate=false to // allow completion window to grow over 0xff. Proto* evs0(evs_from_dummy(dn[0])); bool ret(evs0->set_param("evs.use_aggregate", "false")); fail_unless(ret == true); ret = evs0->set_param("evs.send_window", "1024"); fail_unless(ret == true); ret = evs0->set_param("evs.user_send_window", "515"); Proto* evs1(evs_from_dummy(dn[1])); ret = evs1->set_param("evs.use_aggregate", "false"); fail_unless(ret == true); ret = evs1->set_param("evs.send_window", "1024"); fail_unless(ret == true); ret = evs1->set_param("evs.user_send_window", "512"); prop.set_loss(1, 2, 0.); for (size_t i(0); i < 256; ++i) { dn[0]->send(); dn[0]->send(); dn[1]->send(); gu_trace(prop.propagate_until_empty()); } dn[0]->send(); prop.set_loss(1, 2, 1.); dn[0]->send(); gu_trace(prop.propagate_until_empty()); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_760) { gu_conf_self_tstamp_on(); log_info << "START (trac_760)"; init_rand(); const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(send_n(dn[i], 2)); } gu_trace(prop.propagate_until_empty()); uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); gu_trace(send_n(dn[0], 1)); gu_trace(send_n(dn[1], 1)); // gu_trace(send_n(dn[2], 1)); set_cvi(dn, 0, 1, max_view_seq + 1); dn[2]->set_cvi(V_REG); dn[2]->close(); Proto* evs0(evs_from_dummy(dn[0])); Proto* evs1(evs_from_dummy(dn[1])); while (evs1->state() != Proto::S_GATHER && evs0->state() != Proto::S_GATHER) { gu_trace(prop.propagate_n(1)); } dn[1]->close(); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_41) { gu_conf_self_tstamp_on(); log_info << "START (gh_41)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } // Generate partitioning so that the node with smallest UUID // creates singleton view log_info << "partition"; prop.set_loss(1, 2, 0.); prop.set_loss(2, 1, 0.); prop.set_loss(1, 3, 0.); prop.set_loss(3, 1, 0.); uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), max_view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); prop.propagate_until_cvi(true); // Merge groups and make node 1 leave so that nodes 2 and 3 see // leave message from unknown origin log_info << "merge"; prop.set_loss(1, 2, 1.); prop.set_loss(2, 1, 1.); prop.set_loss(1, 3, 1.); prop.set_loss(3, 1, 1.); // Send message so that nodes 2 and 3 shift to GATHER. This must be done // because LEAVE message is ignored in handle_foreign() dn[0]->send(); dn[0]->close(); dn[0]->set_cvi(V_REG); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 2)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 2)); prop.propagate_until_cvi(true); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_37) { gu_conf_self_tstamp_on(); log_info << "START (gh_37)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); // node 0 is gonna to leave for(size_t i = 2; i <= n_nodes; i++) { // leaving node(LN) is able to send messages to remaining nodes. // prop.set_loss(1, i, 0.); // but remaining nodes(RNS) won't be able to ack these messages. prop.set_loss(i, 1, 0.); // so RNS aru_seq are the same and higher than LN aru_seq. } // LN ss=-1, ir=[2,1] // RNS ss=1, ir=[2,1] dn[0]->send(); dn[0]->send(); dn[0]->close(); dn[0]->set_cvi(V_REG); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); prop.propagate_until_cvi(true); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_40) { gu_conf_self_tstamp_on(); log_info << "START (gh_40)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); // ss=0, ir=[1,0]; dn[0]->send(); gu_trace(prop.propagate_until_empty()); log_info << "gh_40 all got operational state"; // cut dn[0] from dn[1] and dn[2]. for (size_t i = 2; i <= n_nodes; ++i) { prop.set_loss(1, i, 0.); prop.set_loss(i, 1, 0.); } // ss=0, ir=[2,1]; // dn[1] send msg(seq=1) dn[1]->send(); Proto* evs1 = evs_from_dummy(dn[1]); Proto* evs2 = evs_from_dummy(dn[2]); fail_if(evs1->state() != Proto::S_OPERATIONAL); fail_if(evs2->state() != Proto::S_OPERATIONAL); evs1->set_inactive(dn[0]->uuid()); evs2->set_inactive(dn[0]->uuid()); evs1->check_inactive(); evs2->check_inactive(); fail_if(evs1->state() != Proto::S_GATHER); fail_if(evs2->state() != Proto::S_GATHER); while(!(evs1->state() == Proto::S_GATHER && evs1->is_install_message())) { gu_trace(prop.propagate_n(1)); } // dn[0] comes back. // here we have to set message F_RETRANS // otherwise handle_msg ignores this msg. // @todo:why? // dn[0] ack dn[1] msg(seq=1) with flags F_RETRANS. Datagram dg1 = dn[0]->create_datagram(); UserMessage msg1(0, dn[0]->uuid(), ViewId(V_REG, dn[0]->uuid(), max_view_seq), 1, 0, 0, O_DROP, 1, 0xff, Message::F_RETRANS); // dn[0] msg(seq=2) leak into dn[1] input_map. Datagram dg2 = dn[0]->create_datagram(); UserMessage msg2(0, dn[0]->uuid(), ViewId(V_REG, dn[0]->uuid(), max_view_seq), 2, 0, 0, O_SAFE, 2, 0xff, Message::F_RETRANS); // so for dn[1] // input_map: ss=0, ir=[3,2] // install message: ss=0, ir=[2,1] // seq 1 = O_SAFE message.(initiated by self) // seq 2 = O_DROP message.(complete_user) push_header(msg1, dg1); evs1->handle_up(0, dg1, ProtoUpMeta(dn[0]->uuid())); push_header(msg2, dg2); log_info << "evs1 handle msg " << msg2; log_info << "before handle msg: " << *evs1; evs1->handle_up(0, dg2, ProtoUpMeta(dn[0]->uuid())); log_info << "after handle msg: " << *evs1; dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), max_view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); prop.propagate_until_cvi(true); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_100) { log_info << "START (test_gh_100)"; gu::Config conf; mark_point(); gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); conf.set("evs.info_log_mask", "0x3"); conf.set("evs.debug_log_mask", "0xa0"); UUID uuid1(1), uuid2(2); DummyTransport t1(uuid1), t2(uuid2); mark_point(); DummyUser u1(conf), u2(conf); mark_point(); Proto p1(conf, uuid1, 0, gu::URI("evs://"), 10000, 0); // Start p2 view seqno from higher value than p1 View p2_rst_view(0, ViewId(V_REG, uuid2, 3)); Proto p2(conf, uuid2, 0, gu::URI("evs://"), 10000, &p2_rst_view); gcomm::connect(&t1, &p1); gcomm::connect(&p1, &u1); gcomm::connect(&t2, &p2); gcomm::connect(&p2, &u2); single_join(&t1, &p1); // The following is from double_join(). Process messages until // install message is generated. After that handle install timer // on p1 and verify that the newly generated install message has // greater install view id seqno than the first one. Message jm; Message im; Message im2; Message gm; Message gm2; Message msg; Datagram* rb; // Initial states check p2.shift_to(Proto::S_JOINING); fail_unless(p1.state() == Proto::S_OPERATIONAL); fail_unless(p2.state() == Proto::S_JOINING); // Send join message, don't self handle immediately // Expected output: one join message p2.send_join(false); fail_unless(p2.state() == Proto::S_JOINING); rb = get_msg(&t2, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(&t2, &msg); fail_unless(rb == 0); // Handle node 2's join on node 1 // Expected output: shift to S_GATHER and one join message p1.handle_msg(jm); fail_unless(p1.state() == Proto::S_GATHER); rb = get_msg(&t1, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(&t1, &msg); fail_unless(rb == 0); // Handle node 1's join on node 2 // Expected output: shift to S_GATHER and one join message p2.handle_msg(jm); fail_unless(p2.state() == Proto::S_GATHER); rb = get_msg(&t2, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(&t2, &msg); fail_unless(rb == 0); // Handle node 2's join on node 1 // Expected output: Install and commit gap messages, state stays in S_GATHER p1.handle_msg(jm); fail_unless(p1.state() == Proto::S_GATHER); rb = get_msg(&t1, &im); fail_unless(rb != 0); fail_unless(im.type() == Message::T_INSTALL); rb = get_msg(&t1, &gm); fail_unless(rb != 0); fail_unless(gm.type() == Message::T_GAP); fail_unless((gm.flags() & Message::F_COMMIT) != 0); rb = get_msg(&t1, &msg); fail_unless(rb == 0); // usleep(1100000); // Handle timers to to generate shift to GATHER p1.handle_inactivity_timer(); p1.handle_install_timer(); rb = get_msg(&t1, &jm); fail_unless(rb != 0); fail_unless(jm.type() == Message::T_JOIN); rb = get_msg(&t1, &im2); fail_unless(rb != 0); fail_unless(im2.type() == Message::T_INSTALL); fail_unless(im2.install_view_id().seq() > im.install_view_id().seq()); } END_TEST START_TEST(test_evs_protocol_upgrade) { log_info << "START (test_evs_protocol_upgrade)"; PropagationMatrix prop; vector dn; uint32_t view_seq(0); for (int i(0); i <= GCOMM_PROTOCOL_MAX_VERSION; ++i) { gu_trace(dn.push_back(create_dummy_node(i + 1, i))); gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, view_seq + 1); gu_trace(prop.propagate_until_cvi(false)); ++view_seq; for (int j(0); j <= i; ++j) { fail_unless(evs_from_dummy(dn[j])->current_view().version() == 0); gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } } for (int i(0); i < GCOMM_PROTOCOL_MAX_VERSION; ++i) { for (int j(i); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->close(); dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, GCOMM_PROTOCOL_MAX_VERSION, view_seq); gu_trace(prop.propagate_until_cvi(true)); ++view_seq; for (int j(i + 1); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } gu_trace(prop.propagate_until_empty()); } fail_unless(evs_from_dummy(dn[GCOMM_PROTOCOL_MAX_VERSION])->current_view().version() == GCOMM_PROTOCOL_MAX_VERSION); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST Suite* evs2_suite() { Suite* s = suite_create("gcomm::evs"); TCase* tc; bool skip(false); if (skip == false) { tc = tcase_create("test_range"); tcase_add_test(tc, test_range); suite_add_tcase(s, tc); tc = tcase_create("test_message"); tcase_add_test(tc, test_message); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_insert"); tcase_add_test(tc, test_input_map_insert); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_find"); tcase_add_test(tc, test_input_map_find); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_safety"); tcase_add_test(tc, test_input_map_safety); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_erase"); tcase_add_test(tc, test_input_map_erase); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_overwrap"); tcase_add_test(tc, test_input_map_overwrap); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_random_insert"); tcase_add_test(tc, test_input_map_random_insert); suite_add_tcase(s, tc); tc = tcase_create("test_proto_single_join"); tcase_add_test(tc, test_proto_single_join); suite_add_tcase(s, tc); tc = tcase_create("test_proto_double_join"); tcase_add_test(tc, test_proto_double_join); suite_add_tcase(s, tc); if (run_all_evs_tests() == true) { tc = tcase_create("test_proto_join_n"); tcase_add_test(tc, test_proto_join_n); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n_w_user_msg"); tcase_add_test(tc, test_proto_join_n_w_user_msg); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n_lossy"); tcase_add_test(tc, test_proto_join_n_lossy); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n_lossy_w_user_msg"); tcase_add_test(tc, test_proto_join_n_lossy_w_user_msg); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n"); tcase_add_test(tc, test_proto_leave_n); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n_w_user_msg"); tcase_add_test(tc, test_proto_leave_n_w_user_msg); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n_lossy"); tcase_add_test(tc, test_proto_leave_n_lossy); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n_lossy_w_user_msg"); tcase_add_test(tc, test_proto_leave_n_lossy_w_user_msg); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge"); tcase_add_test(tc, test_proto_split_merge); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge_lossy"); tcase_add_test(tc, test_proto_split_merge_lossy); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge_w_user_msg"); tcase_add_test(tc, test_proto_split_merge_w_user_msg); tcase_set_timeout(tc, 60); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge_lossy_w_user_msg"); tcase_add_test(tc, test_proto_split_merge_lossy_w_user_msg); tcase_set_timeout(tc, 60); suite_add_tcase(s, tc); tc = tcase_create("test_proto_stop_cont"); tcase_add_test(tc, test_proto_stop_cont); tcase_set_timeout(tc, 10); suite_add_tcase(s, tc); } if (run_all_evs_tests() == true) { tc = tcase_create("test_proto_split_two"); tcase_add_test(tc, test_proto_split_two); suite_add_tcase(s, tc); tc = tcase_create("test_aggreg"); tcase_add_test(tc, test_aggreg); suite_add_tcase(s, tc); } if (run_all_evs_tests() == true) { tc = tcase_create("test_proto_arbitrate"); tcase_add_test(tc, test_proto_arbitrate); suite_add_tcase(s, tc); tc = tcase_create("test_trac_538"); tcase_add_test(tc, test_trac_538); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_552"); tcase_add_test(tc, test_trac_552); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_607"); tcase_add_test(tc, test_trac_607); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_724"); tcase_add_test(tc, test_trac_724); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_760"); tcase_add_test(tc, test_trac_760); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_gh_41"); tcase_add_test(tc, test_gh_41); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_gh_37"); tcase_add_test(tc, test_gh_37); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_gh_40"); tcase_add_test(tc, test_gh_40); tcase_set_timeout(tc, 5); suite_add_tcase(s, tc); } tc = tcase_create("test_gh_100"); tcase_add_test(tc, test_gh_100); suite_add_tcase(s, tc); if (run_all_evs_tests() == true) { tc = tcase_create("test_evs_protocol_upgrade"); tcase_add_test(tc, test_evs_protocol_upgrade); suite_add_tcase(s, tc); } } return s; } galera-3-25.3.20/gcomm/test/check_pc.cpp0000644000015300001660000037734513042054732017505 0ustar jenkinsjenkins/* * Copyright (C) 2009-2014 Codership Oy */ #include "check_gcomm.hpp" #include "pc_message.hpp" #include "pc_proto.hpp" #include "evs_proto.hpp" #include "check_templ.hpp" #include "check_trace.hpp" #include "gcomm/conf.hpp" #include "gu_errno.h" #include "gu_asio.hpp" // gu::ssl_register_params() #include #include #include #include // // run_all_pc_tests is set to true by default. To disable pc tests // which use real TCP transport or depend on wall clock, // set GALERA_TEST_DETERMINISTIC env // variable before running pc test suite. // static class run_all_pc_tests { public: run_all_pc_tests() : run_all_tests_() { if (::getenv("GALERA_TEST_DETERMINISTIC")) { run_all_tests_ = false; } else { run_all_tests_ = true; } } bool operator()() const { return run_all_tests_; } private: bool run_all_tests_; } run_all_pc_tests; using namespace std; using namespace std::rel_ops; using namespace gu::datetime; using namespace gcomm; using namespace gcomm::pc; using gu::byte_t; using gu::Buffer; using gu::Exception; using gu::URI; using gu::DeleteObject; START_TEST(test_pc_messages) { StateMessage pcs(0); pc::NodeMap& sim(pcs.node_map()); sim.insert(std::make_pair(UUID(0,0), pc::Node(true, false, false, 6, ViewId(V_PRIM, UUID(0, 0), 9), 42, -1))); sim.insert(std::make_pair(UUID(0,0), pc::Node(false, true, false, 88, ViewId(V_PRIM, UUID(0, 0), 3), 472, 0))); sim.insert(std::make_pair(UUID(0,0), pc::Node(true, false, true, 78, ViewId(V_PRIM, UUID(0, 0), 87), 52, 1))); size_t expt_size = 4 // hdr + 4 // seq + 4 + 3*(UUID::serial_size() + sizeof(uint32_t) + 4 + 20 + 8); // NodeMap check_serialization(pcs, expt_size, StateMessage(-1)); InstallMessage pci(0); pc::NodeMap& iim = pci.node_map(); iim.insert(std::make_pair(UUID(0,0), pc::Node(true, true, true, 6, ViewId(V_PRIM, UUID(0, 0), 9), 42, -1))); iim.insert(std::make_pair(UUID(0,0), pc::Node(false, false, false, 88, ViewId(V_NON_PRIM, UUID(0, 0), 3), 472, 0))); iim.insert(std::make_pair(UUID(0,0), pc::Node(true, false, false, 78, ViewId(V_PRIM, UUID(0, 0), 87), 52, 1))); iim.insert(std::make_pair(UUID(0,0), pc::Node(false, true, true, 457, ViewId(V_NON_PRIM, UUID(0, 0), 37), 56, 0xff))); expt_size = 4 // hdr + 4 // seq + 4 + 4*(UUID::serial_size() + sizeof(uint32_t) + 4 + 20 + 8); // NodeMap check_serialization(pci, expt_size, InstallMessage(-1)); UserMessage pcu(0, 7); pcu.checksum(0xfefe, true); expt_size = 4 + 4; check_serialization(pcu, expt_size, UserMessage(-1, -1U)); fail_unless(pcu.serial_size() % 4 == 0); } END_TEST class PCUser : public Toplay { public: PCUser(gu::Config& conf, const UUID& uuid, DummyTransport *tp, Proto* pc) : Toplay(conf), views_(), uuid_(uuid), tp_(tp), pc_(pc) { gcomm::connect(tp_, pc_); gcomm::connect(pc_, this); } const UUID& uuid() const { return uuid_; } DummyTransport* tp() { return tp_; } Proto* pc() { return pc_; } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (um.has_view() == true) { const View& view(um.view()); log_info << view; fail_unless(view.type() == V_PRIM || view.type() == V_NON_PRIM); views_.push_back(View(view)); } } void send() { byte_t pl[4] = {1, 2, 3, 4}; Buffer buf(pl, pl + sizeof(pl)); Datagram dg(buf); fail_unless(send_down(dg, ProtoDownMeta()) == 0); } private: PCUser(const PCUser&); void operator=(const PCUser&); list views_; UUID uuid_; DummyTransport* tp_; Proto* pc_; }; void get_msg(Datagram* rb, Message* msg, bool release = true) { assert(msg != 0); if (rb == 0) { log_info << "get_msg: (null)"; } else { // assert(rb->get_header().size() == 0 && rb->get_offset() == 0); const byte_t* begin(gcomm::begin(*rb)); const size_t available(gcomm::available(*rb)); fail_unless(msg->unserialize(begin, available, 0) != 0); log_info << "get_msg: " << msg->to_string(); if (release) delete rb; } } void single_boot(int version, PCUser* pu1) { ProtoUpMeta sum1(pu1->uuid()); View vt0(version, ViewId(V_TRANS, pu1->uuid(), 0)); vt0.add_member(pu1->uuid(), 0); ProtoUpMeta um1(UUID::nil(), ViewId(), &vt0); pu1->pc()->connect(true); // pu1->pc()->shift_to(Proto::S_JOINING); pu1->pc()->handle_up(0, Datagram(), um1); fail_unless(pu1->pc()->state() == Proto::S_TRANS); View vr1(version, ViewId(V_REG, pu1->uuid(), 1)); vr1.add_member(pu1->uuid(), 0); ProtoUpMeta um2(UUID::nil(), ViewId(), &vr1); pu1->pc()->handle_up(0, Datagram(), um2); fail_unless(pu1->pc()->state() == Proto::S_STATES_EXCH); Datagram* rb = pu1->tp()->out(); fail_unless(rb != 0); Message sm1; get_msg(rb, &sm1); fail_unless(sm1.type() == Message::T_STATE); fail_unless(sm1.node_map().size() == 1); { const pc::Node& pi1 = pc::NodeMap::value(sm1.node_map().begin()); fail_unless(pi1.prim() == true); fail_unless(pi1.last_prim() == ViewId(V_PRIM, pu1->uuid(), 0)); } pu1->pc()->handle_msg(sm1, Datagram(), sum1); fail_unless(pu1->pc()->state() == Proto::S_INSTALL); rb = pu1->tp()->out(); fail_unless(rb != 0); Message im1; get_msg(rb, &im1); fail_unless(im1.type() == Message::T_INSTALL); fail_unless(im1.node_map().size() == 1); { const pc::Node& pi1 = pc::NodeMap::value(im1.node_map().begin()); fail_unless(pi1.prim() == true); fail_unless(pi1.last_prim() == ViewId(V_PRIM, pu1->uuid(), 0)); } pu1->pc()->handle_msg(im1, Datagram(), sum1); fail_unless(pu1->pc()->state() == Proto::S_PRIM); } START_TEST(test_pc_view_changes_single) { log_info << "START (test_pc_view_changes_single)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(0, 0); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); } END_TEST static void double_boot(int version, PCUser* pu1, PCUser* pu2) { ProtoUpMeta pum1(pu1->uuid()); ProtoUpMeta pum2(pu2->uuid()); View t11(version, ViewId(V_TRANS, pu1->pc()->current_view().id())); t11.add_member(pu1->uuid(), 0); pu1->pc()->handle_view(t11); fail_unless(pu1->pc()->state() == Proto::S_TRANS); View t12(version, ViewId(V_TRANS, pu2->uuid(), 0)); t12.add_member(pu2->uuid(), 0); // pu2->pc()->shift_to(Proto::S_JOINING); pu2->pc()->connect(false); pu2->pc()->handle_view(t12); fail_unless(pu2->pc()->state() == Proto::S_TRANS); View r1(version, ViewId(V_REG, pu1->uuid(), pu1->pc()->current_view().id().seq() + 1)); r1.add_member(pu1->uuid(), 0); r1.add_member(pu2->uuid(), 0); pu1->pc()->handle_view(r1); fail_unless(pu1->pc()->state() == Proto::S_STATES_EXCH); pu2->pc()->handle_view(r1); fail_unless(pu2->pc()->state() == Proto::S_STATES_EXCH); Datagram* rb = pu1->tp()->out(); fail_unless(rb != 0); Message sm1; get_msg(rb, &sm1); fail_unless(sm1.type() == Message::T_STATE); rb = pu2->tp()->out(); fail_unless(rb != 0); Message sm2; get_msg(rb, &sm2); fail_unless(sm2.type() == Message::T_STATE); rb = pu1->tp()->out(); fail_unless(rb == 0); rb = pu2->tp()->out(); fail_unless(rb == 0); pu1->pc()->handle_msg(sm1, Datagram(), pum1); rb = pu1->tp()->out(); fail_unless(rb == 0); fail_unless(pu1->pc()->state() == Proto::S_STATES_EXCH); pu1->pc()->handle_msg(sm2, Datagram(), pum2); fail_unless(pu1->pc()->state() == Proto::S_INSTALL); pu2->pc()->handle_msg(sm1, Datagram(), pum1); rb = pu2->tp()->out(); fail_unless(rb == 0); fail_unless(pu2->pc()->state() == Proto::S_STATES_EXCH); pu2->pc()->handle_msg(sm2, Datagram(), pum2); fail_unless(pu2->pc()->state() == Proto::S_INSTALL); Message im1; UUID imsrc; if (pu1->uuid() < pu2->uuid()) { rb = pu1->tp()->out(); imsrc = pu1->uuid(); } else { rb = pu2->tp()->out(); imsrc = pu2->uuid(); } fail_unless(rb != 0); get_msg(rb, &im1); fail_unless(im1.type() == Message::T_INSTALL); fail_unless(pu1->tp()->out() == 0); fail_unless(pu2->tp()->out() == 0); ProtoUpMeta ipum(imsrc); pu1->pc()->handle_msg(im1, Datagram(), ipum); fail_unless(pu1->pc()->state() == Proto::S_PRIM); pu2->pc()->handle_msg(im1, Datagram(), ipum); fail_unless(pu2->pc()->state() == Proto::S_PRIM); } // Form PC for three instances. static void triple_boot(int version, PCUser* pu1, PCUser* pu2, PCUser* pu3) { fail_unless(pu1->uuid() < pu2->uuid() && pu2->uuid() < pu3->uuid()); // trans views { View tr12(version, ViewId(V_TRANS, pu1->pc()->current_view().id())); tr12.add_member(pu1->uuid(), 0); tr12.add_member(pu2->uuid(), 0); ProtoUpMeta trum12(UUID::nil(), ViewId(), &tr12); pu1->pc()->handle_up(0, Datagram(), trum12); pu2->pc()->handle_up(0, Datagram(), trum12); fail_unless(pu1->pc()->state() == Proto::S_TRANS); fail_unless(pu2->pc()->state() == Proto::S_TRANS); pu3->pc()->connect(false); View tr3(version, ViewId(V_TRANS, pu3->uuid(), 0)); tr3.add_member(pu3->uuid(), 0); ProtoUpMeta trum3(UUID::nil(), ViewId(), &tr3); pu3->pc()->handle_up(0, Datagram(), trum3); fail_unless(pu3->pc()->state() == Proto::S_TRANS); } // reg view { View reg(version, ViewId(V_REG, pu1->uuid(), pu1->pc()->current_view().id().seq() + 1)); reg.add_member(pu1->uuid(), 0); reg.add_member(pu2->uuid(), 0); reg.add_member(pu3->uuid(), 0); ProtoUpMeta pum(UUID::nil(), ViewId(), ®); pu1->pc()->handle_up(0, Datagram(), pum); pu2->pc()->handle_up(0, Datagram(), pum); pu3->pc()->handle_up(0, Datagram(), pum); fail_unless(pu1->pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2->pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu3->pc()->state() == Proto::S_STATES_EXCH); } // states exch { Datagram* dg(pu1->tp()->out()); fail_unless(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); delete dg; dg = pu2->tp()->out(); fail_unless(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu2->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu2->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu2->uuid())); delete dg; dg = pu3->tp()->out(); fail_unless(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu3->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu3->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu3->uuid())); delete dg; fail_unless(pu1->pc()->state() == Proto::S_INSTALL); fail_unless(pu2->pc()->state() == Proto::S_INSTALL); fail_unless(pu3->pc()->state() == Proto::S_INSTALL); } // install { Datagram* dg(pu1->tp()->out()); fail_unless(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); delete dg; fail_unless(pu1->pc()->state() == Proto::S_PRIM); fail_unless(pu2->pc()->state() == Proto::S_PRIM); fail_unless(pu3->pc()->state() == Proto::S_PRIM); } } START_TEST(test_pc_view_changes_double) { log_info << "START (test_pc_view_changes_double)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); Datagram* rb; View tnp(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tnp.add_member(uuid1, 0); pu1.pc()->handle_view(tnp); fail_unless(pu1.pc()->state() == Proto::S_TRANS); View reg(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg.add_member(uuid1, 0); pu1.pc()->handle_view(reg); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); rb = pu1.tp()->out(); fail_unless(rb != 0); pu1.pc()->handle_up(0, *rb, ProtoUpMeta(uuid1)); fail_unless(pu1.pc()->state() == Proto::S_NON_PRIM); delete rb; View tpv2(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tpv2.add_member(uuid2, 0); tpv2.add_left(uuid1, 0); pu2.pc()->handle_view(tpv2); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu2.tp()->out() == 0); View rp2(0, ViewId(V_REG, uuid2, pu1.pc()->current_view().id().seq() + 1)); rp2.add_member(uuid2, 0); rp2.add_left(uuid1, 0); pu2.pc()->handle_view(rp2); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); rb = pu2.tp()->out(); fail_unless(rb != 0); Message sm2; get_msg(rb, &sm2); fail_unless(sm2.type() == Message::T_STATE); fail_unless(pu2.tp()->out() == 0); pu2.pc()->handle_msg(sm2, Datagram(), pum2); fail_unless(pu2.pc()->state() == Proto::S_INSTALL); rb = pu2.tp()->out(); fail_unless(rb != 0); Message im2; get_msg(rb, &im2); fail_unless(im2.type() == Message::T_INSTALL); pu2.pc()->handle_msg(im2, Datagram(), pum2); fail_unless(pu2.pc()->state() == Proto::S_PRIM); } END_TEST /* Test that UUID ordering does not matter when starting nodes */ START_TEST(test_pc_view_changes_reverse) { log_info << "START (test_pc_view_changes_reverse)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); single_boot(0, &pu2); double_boot(0, &pu2, &pu1); } END_TEST START_TEST(test_pc_state1) { log_info << "START (test_pc_state1)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); // n1: PRIM -> TRANS -> STATES_EXCH -> RTR -> PRIM // n2: JOINING -> STATES_EXCH -> RTR -> PRIM double_boot(0, &pu1, &pu2); fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); // PRIM -> TRANS -> STATES_EXCH -> RTR -> TRANS -> STATES_EXCH -> RTR ->PRIM View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); tr1.add_member(uuid2, 0); pu1.pc()->handle_view(tr1); pu2.pc()->handle_view(tr1); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu1.tp()->out() == 0); fail_unless(pu2.tp()->out() == 0); View reg2(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg2.add_member(uuid1, 0); reg2.add_member(uuid2, 0); pu1.pc()->handle_view(reg2); pu2.pc()->handle_view(reg2); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); Message msg; get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); fail_unless(pu1.pc()->state() == Proto::S_INSTALL); fail_unless(pu2.pc()->state() == Proto::S_INSTALL); View tr2(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr2.add_member(uuid1, 0); tr2.add_member(uuid2, 0); pu1.pc()->handle_view(tr2); pu2.pc()->handle_view(tr2); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); Message im; if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg3.add_member(uuid1, 0); reg3.add_member(uuid2, 0); pu1.pc()->handle_view(reg3); pu2.pc()->handle_view(reg3); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); fail_unless(pu1.pc()->state() == Proto::S_INSTALL); fail_unless(pu2.pc()->state() == Proto::S_INSTALL); if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_state2) { log_info << "START (test_pc_state2)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); // n1: PRIM -> TRANS -> STATES_EXCH -> RTR -> PRIM // n2: JOINING -> STATES_EXCH -> RTR -> PRIM double_boot(0, &pu1, &pu2); fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); // PRIM -> TRANS -> STATES_EXCH -> TRANS -> STATES_EXCH -> RTR -> PRIM View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); tr1.add_member(uuid2, 0); pu1.pc()->handle_view(tr1); pu2.pc()->handle_view(tr1); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu1.tp()->out() == 0); fail_unless(pu2.tp()->out() == 0); View reg2(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg2.add_member(uuid1, 0); reg2.add_member(uuid2, 0); pu1.pc()->handle_view(reg2); pu2.pc()->handle_view(reg2); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); View tr2(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr2.add_member(uuid1, 0); tr2.add_member(uuid2, 0); pu1.pc()->handle_view(tr2); pu2.pc()->handle_view(tr2); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); Message msg; get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg3.add_member(uuid1, 0); reg3.add_member(uuid2, 0); pu1.pc()->handle_view(reg3); pu2.pc()->handle_view(reg3); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); fail_unless(pu1.pc()->state() == Proto::S_INSTALL); fail_unless(pu2.pc()->state() == Proto::S_INSTALL); Message im; if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_state3) { log_info << "START (test_pc_state3)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); // n1: PRIM -> TRANS -> STATES_EXCH -> RTR -> PRIM // n2: JOINING -> STATES_EXCH -> RTR -> PRIM double_boot(0, &pu1, &pu2); fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); // PRIM -> NON_PRIM -> STATES_EXCH -> RTR -> NON_PRIM -> STATES_EXCH -> ... // -> NON_PRIM -> STATES_EXCH -> RTR -> NON_PRIM View tr11(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr11.add_member(uuid1, 0); pu1.pc()->handle_view(tr11); View tr12(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr12.add_member(uuid2, 0); pu2.pc()->handle_view(tr12); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu1.tp()->out() == 0); fail_unless(pu2.tp()->out() == 0); View reg21(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg21.add_member(uuid1, 0); pu1.pc()->handle_view(reg21); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); View reg22(0, ViewId(V_REG, uuid2, pu2.pc()->current_view().id().seq() + 1)); reg22.add_member(uuid2, 0); pu2.pc()->handle_view(reg22); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); Message msg; get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); get_msg(pu2.tp()->out(), &msg); pu2.pc()->handle_msg(msg, Datagram(), pum2); fail_unless(pu1.pc()->state() == Proto::S_NON_PRIM); fail_unless(pu2.pc()->state() == Proto::S_NON_PRIM); View tr21(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr21.add_member(uuid1, 0); pu1.pc()->handle_view(tr21); View tr22(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr22.add_member(uuid2, 0); pu2.pc()->handle_view(tr22); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu1.tp()->out() == 0); fail_unless(pu2.tp()->out() == 0); View reg3(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg3.add_member(uuid1, 0); reg3.add_member(uuid2, 0); pu1.pc()->handle_view(reg3); pu2.pc()->handle_view(reg3); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); fail_unless(pu1.pc()->state() == Proto::S_INSTALL); fail_unless(pu2.pc()->state() == Proto::S_INSTALL); Message im; if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_conflicting_prims) { log_info << "START (test_pc_conflicting_prims)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); single_boot(0, &pu2); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); pu1.pc()->handle_view(tr1); View tr2(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr2.add_member(uuid2, 0); pu2.pc()->handle_view(tr2); View reg(0, ViewId(V_REG, uuid1, tr1.id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); pu1.pc()->handle_view(reg); pu2.pc()->handle_view(reg); Message msg1, msg2; /* First node must discard msg2 and stay in states exch waiting for * trans view */ get_msg(pu1.tp()->out(), &msg1); get_msg(pu2.tp()->out(), &msg2); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); pu1.pc()->handle_msg(msg1, Datagram(), pum1); pu1.pc()->handle_msg(msg2, Datagram(), pum2); /* Second node must abort */ try { pu2.pc()->handle_msg(msg1, Datagram(), pum1); fail("not aborted"); } catch (Exception& e) { log_info << e.what(); } fail_unless(pu1.tp()->out() == 0); View tr3(0, ViewId(V_TRANS, reg.id())); tr3.add_member(uuid1, 0); pu1.pc()->handle_view(tr3); View reg3(0, ViewId(V_REG, uuid1, tr3.id().seq() + 1)); reg3.add_member(uuid1, 0); pu1.pc()->handle_view(reg3); get_msg(pu1.tp()->out(), &msg1); pu1.pc()->handle_msg(msg1, Datagram(), pum1); get_msg(pu1.tp()->out(), &msg1); pu1.pc()->handle_msg(msg1, Datagram(), pum1); fail_unless(pu1.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_conflicting_prims_npvo) { log_info << "START (test_pc_conflicting_npvo)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0, URI("pc://?pc.npvo=true")); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0, URI("pc://?pc.npvo=true")); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); single_boot(0, &pu2); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); pu1.pc()->handle_view(tr1); View tr2(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr2.add_member(uuid2, 0); pu2.pc()->handle_view(tr2); View reg(0, ViewId(V_REG, uuid1, tr1.id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); pu1.pc()->handle_view(reg); pu2.pc()->handle_view(reg); Message msg1, msg2; /* First node must discard msg2 and stay in states exch waiting for * trans view */ get_msg(pu1.tp()->out(), &msg1); get_msg(pu2.tp()->out(), &msg2); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); pu1.pc()->handle_msg(msg1, Datagram(), pum1); pu2.pc()->handle_msg(msg1, Datagram(), pum1); /* First node must abort */ try { pu1.pc()->handle_msg(msg2, Datagram(), pum2); fail("not aborted"); } catch (Exception& e) { log_info << e.what(); } fail_unless(pu2.tp()->out() == 0); View tr3(0, ViewId(V_TRANS, reg.id())); tr3.add_member(uuid2, 0); pu2.pc()->handle_view(tr3); View reg3(0, ViewId(V_REG, uuid2, tr3.id().seq() + 1)); reg3.add_member(uuid2, 0); pu2.pc()->handle_view(reg3); get_msg(pu2.tp()->out(), &msg2); pu2.pc()->handle_msg(msg2, Datagram(), pum2); get_msg(pu2.tp()->out(), &msg2); pu2.pc()->handle_msg(msg2, Datagram(), pum2); fail_unless(pu2.pc()->state() == Proto::S_PRIM); } END_TEST static void join_node(PropagationMatrix* p, DummyNode* n, bool first) { log_info << first; gu_trace(p->insert_tp(n)); gu_trace(n->connect(first)); } static void send_n(DummyNode* node, const size_t n) { for (size_t i = 0; i < n; ++i) { gu_trace(node->send()); } } static void set_cvi(vector& nvec, size_t i_begin, size_t i_end, size_t seq, ViewType type) { for (size_t i = i_begin; i <= i_end; ++i) { nvec[i]->set_cvi(ViewId(type, type == V_NON_PRIM ? nvec[0]->uuid() : nvec[i_begin]->uuid(), static_cast(type == V_NON_PRIM ? seq - 1 : seq))); } } struct InitGuConf { explicit InitGuConf(gu::Config& conf) { gcomm::Conf::register_params(conf); } }; static gu::Config& static_gu_conf() { static gu::Config conf; static InitGuConf init(conf); return conf; } static DummyNode* create_dummy_node(size_t idx, int version, const string& suspect_timeout = "PT1H", const string& inactive_timeout = "PT1H", const string& retrans_period = "PT20M", int weight = 1) { gu::Config& gu_conf(static_gu_conf()); gu::ssl_register_params(gu_conf); gcomm::Conf::register_params(gu_conf); const string conf = "evs://?" + Conf::EvsViewForgetTimeout + "=PT1H&" + Conf::EvsInactiveCheckPeriod + "=" + to_string(Period(suspect_timeout)/3) + "&" + Conf::EvsSuspectTimeout + "=" + suspect_timeout + "&" + Conf::EvsInactiveTimeout + "=" + inactive_timeout + "&" + Conf::EvsKeepalivePeriod + "=" + retrans_period + "&" + Conf::EvsJoinRetransPeriod + "=" + retrans_period + "&" + Conf::EvsInstallTimeout + "=" + inactive_timeout + "&" + Conf::PcWeight + "=" + gu::to_string(weight) + "&" + Conf::EvsVersion + "=" + gu::to_string(version) + "&" + Conf::EvsInfoLogMask + "=" + "0x3"; list protos; UUID uuid(static_cast(idx)); protos.push_back(new DummyTransport(uuid, false)); protos.push_back(new evs::Proto(gu_conf, uuid, 0, conf)); protos.push_back(new Proto(gu_conf, uuid, 0, conf)); return new DummyNode(gu_conf, idx, protos); } namespace { gcomm::pc::Proto* pc_from_dummy(DummyNode* dn) { return reinterpret_cast(dn->protos().back()); } } static ViewType view_type(const size_t i_begin, const size_t i_end, const size_t n_nodes) { return (((i_end - i_begin + 1)*2 > n_nodes) ? V_PRIM : V_NON_PRIM); } START_TEST(test_pc_split_merge) { log_info << "START (test_pc_split_merge)"; size_t n_nodes(5); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; mark_point(); for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } mark_point(); for (size_t i = 1; i < n_nodes; ++i) { for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.split(j + 1, k + 1); } } ++view_seq; log_info << "split " << i << " view seq " << view_seq; set_cvi(dn, 0, i - 1, view_seq, view_type(0, i - 1, n_nodes)); set_cvi(dn, i, n_nodes - 1, view_seq, view_type(i,n_nodes - 1,n_nodes)); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.merge(j + 1, k + 1); } } ++view_seq; log_info << "merge " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } mark_point(); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_pc_split_merge_w_user_msg) { log_info << "START (test_pc_split_merge_w_user_msg)"; size_t n_nodes(5); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 1; i < n_nodes; ++i) { for (size_t j = 0; j < n_nodes; ++j) { send_n(dn[j], ::rand() % 5); } for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.split(j + 1, k + 1); } } ++view_seq; log_info << "split " << i << " view seq " << view_seq; set_cvi(dn, 0, i - 1, view_seq, view_type(0, i - 1, n_nodes)); set_cvi(dn, i, n_nodes - 1, view_seq, view_type(i, n_nodes - 1, n_nodes)); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < n_nodes; ++j) { send_n(dn[j], ::rand() % 5); } for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.merge(j + 1, k + 1); } } ++view_seq; log_info << "merge " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_pc_complete_split_merge) { log_info << "START (test_pc_complete_split_merge)"; size_t n_nodes(5); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); log_info << "i " << i; gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < 5; ++i) { for (size_t j = 0; j < n_nodes; ++j) { send_n(dn[j], ::rand() % 5); } prop.propagate_n(9 + ::rand() % 5); for (size_t j = 0; j < n_nodes; ++j) { for (size_t k = 0; k < n_nodes; ++k) { if (j != k) { prop.split(j + 1, k + 1); } } } ++view_seq; log_info << "split " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < n_nodes; ++j) { for (size_t k = 0; k < n_nodes; ++k) { if (j != k) { prop.merge(j + 1, k + 1); } } } ++view_seq; log_info << "merge " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_pc_protocol_upgrade) { log_info << "START (test_pc_protocol_upgrade)"; vector dn; PropagationMatrix prop; uint32_t view_seq(0); for (int i(0); i <= GCOMM_PROTOCOL_MAX_VERSION; ++i) { dn.push_back(create_dummy_node(i + 1, i)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); ++view_seq; for (int j(0); j <= i; ++j) { fail_unless(pc_from_dummy(dn[j])->current_view().version() == 0); gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } } for (int i(0); i < GCOMM_PROTOCOL_MAX_VERSION; ++i) { for (int j(i); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->close(); dn[i]->set_cvi(V_NON_PRIM); set_cvi(dn, i + 1, GCOMM_PROTOCOL_MAX_VERSION, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); ++view_seq; for (int j(i + 1); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } gu_trace(prop.propagate_until_empty()); } fail_unless(pc_from_dummy(dn[GCOMM_PROTOCOL_MAX_VERSION])->current_view().version() == GCOMM_PROTOCOL_MAX_VERSION); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST class PCUser2 : public Toplay { Transport* tp_; bool sending_; uint8_t my_type_; bool send_; Period send_period_; Date next_send_; PCUser2(const PCUser2&); void operator=(const PCUser2); public: PCUser2(Protonet& net, const string& uri, const bool send = true) : Toplay(net.conf()), tp_(Transport::create(net, uri)), sending_(false), my_type_(static_cast(1 + ::rand()%4)), send_(send), send_period_("PT0.05S"), next_send_(Date::max()) { } ~PCUser2() { delete tp_; } void start() { gcomm::connect(tp_, this); tp_->connect(); gcomm::disconnect(tp_, this); tp_->pstack().push_proto(this); } void stop() { sending_ = false; tp_->pstack().pop_proto(this); gcomm::connect(tp_, this); tp_->close(); gcomm::disconnect(tp_, this); } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (um.has_view()) { const View& view(um.view()); log_info << view; if (view.type() == V_PRIM && send_ == true) { sending_ = true; next_send_ = Date::now() + send_period_; } } else { // log_debug << "received message: " << um.get_to_seq(); fail_unless(rb.len() - rb.offset() == 16); if (um.source() == tp_->uuid()) { fail_unless(um.user_type() == my_type_); } } } Protostack& pstack() { return tp_->pstack(); } Date handle_timers() { Date now(Date::now()); if (now >= next_send_) { byte_t buf[16]; memset(buf, 0xa, sizeof(buf)); Datagram dg(Buffer(buf, buf + sizeof(buf))); // dg.get_header().resize(128); // dg.set_header_offset(128); int ret = send_down(dg, ProtoDownMeta(my_type_, rand() % 10 == 0 ? O_SAFE : O_LOCAL_CAUSAL)); if (ret != 0) { // log_debug << "send down " << ret; } next_send_ = next_send_ + send_period_; } return next_send_; } std::string listen_addr() const { return tp_->listen_addr(); } }; START_TEST(test_pc_transport) { log_info << "START (test_pc_transport)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); auto_ptr net(Protonet::create(conf)); PCUser2 pu1(*net, "pc://?" "evs.info_log_mask=0xff&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "pc.recovery=0&" "node.name=n1"); gu_conf_self_tstamp_on(); pu1.start(); net->event_loop(5*Sec); PCUser2 pu2(*net, std::string("pc://") + pu1.listen_addr().erase(0, strlen("tcp://")) + "?evs.info_log_mask=0xff&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "pc.recovery=0&" "node.name=n2"); PCUser2 pu3(*net, std::string("pc://") + pu1.listen_addr().erase(0, strlen("tcp://")) + "?evs.info_log_mask=0xff&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "pc.recovery=0&" "node.name=n3"); pu2.start(); net->event_loop(5*Sec); pu3.start(); net->event_loop(5*Sec); pu3.stop(); net->event_loop(5*Sec); pu2.stop(); net->event_loop(5*Sec); pu1.stop(); log_info << "cleanup"; net->event_loop(0); log_info << "finished"; } END_TEST START_TEST(test_trac_191) { log_info << "START (test_trac_191)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1), uuid2(2), uuid3(3), uuid4(4); Proto p(conf, uuid4, 0); DummyTransport tp(uuid4, true); // gcomm::connect(&tp, &p); PCUser pu(conf, uuid4, &tp, &p); p.shift_to(Proto::S_NON_PRIM); View t0(0, ViewId(V_TRANS, uuid4, 0)); t0.add_member(uuid4, 0); p.handle_view(t0); View r5(0, ViewId(V_REG, uuid2, 5)); r5.add_member(uuid3, 0); r5.add_member(uuid4, 0); p.handle_view(r5); Datagram* dg = tp.out(); fail_unless(dg != 0); Message sm4; get_msg(dg, &sm4); fail_unless(sm4.type() == Message::T_STATE); // Handle first sm from uuid3 StateMessage sm3(0); pc::NodeMap& im3(sm3.node_map()); im3.insert_unique(make_pair(uuid1, pc::Node(true, false, false, 254, ViewId(V_PRIM, uuid1, 3), 20))); im3.insert_unique(make_pair(uuid2, pc::Node(true, false, false, 254, ViewId(V_PRIM, uuid1, 3), 20))); im3.insert_unique(make_pair(uuid3, pc::Node(false, false, false, 254, ViewId(V_PRIM, uuid1, 3), 25))); p.handle_msg(sm3, Datagram(), ProtoUpMeta(uuid3)); p.handle_msg(sm4, Datagram(), ProtoUpMeta(uuid4)); } END_TEST START_TEST(test_trac_413) { log_info << "START (test_trac_413)"; class TN : gcomm::Toplay // test node { public: TN(gu::Config conf, const UUID& uuid) : Toplay(conf), p_(conf, uuid, 0), tp_(uuid, true) { gcomm::connect(&tp_, &p_); gcomm::connect(&p_, this); } const UUID& uuid() const { return p_.uuid(); } gcomm::pc::Proto& p() { return p_; } DummyTransport& tp() { return tp_; } void handle_up(const void* id, const Datagram& dg, const gcomm::ProtoUpMeta& um) { // void } private: pc::Proto p_; DummyTransport tp_; }; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); TN n1(conf, 1), n2(conf, 2), n3(conf, 3); // boot to first prim { gcomm::View tr(0, ViewId(V_TRANS, n1.uuid(), 0)); tr.members().insert_unique(std::make_pair(n1.uuid(), 0)); n1.p().connect(true); n1.p().handle_view(tr); Datagram* dg(n1.tp().out()); fail_unless(dg == 0 && n1.p().state() == gcomm::pc::Proto::S_TRANS); gcomm::View reg(0, ViewId(V_REG, n1.uuid(), 1)); reg.members().insert_unique(std::make_pair(n1.uuid(), 0)); n1.p().handle_view(reg); dg = n1.tp().out(); fail_unless(dg != 0 && n1.p().state() == gcomm::pc::Proto::S_STATES_EXCH); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; dg = n1.tp().out(); fail_unless(dg != 0 && n1.p().state() == gcomm::pc::Proto::S_INSTALL); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; dg = n1.tp().out(); fail_unless(dg == 0 && n1.p().state() == gcomm::pc::Proto::S_PRIM); } // add remaining nodes { gcomm::View tr(0, ViewId(V_TRANS, n1.uuid(), 1)); tr.members().insert_unique(std::make_pair(n1.uuid(), 0)); n1.p().handle_view(tr); } { gcomm::View tr(0, ViewId(V_TRANS, n2.uuid(), 0)); tr.members().insert_unique(std::make_pair(n2.uuid(), 0)); n2.p().connect(false); n2.p().handle_view(tr); } { gcomm::View tr(0, ViewId(V_TRANS, n3.uuid(), 0)); tr.members().insert_unique(std::make_pair(n3.uuid(), 0)); n3.p().connect(false); n3.p().handle_view(tr); } { gcomm::View reg(0, ViewId(V_REG, n1.uuid(), 2)); reg.members().insert_unique(std::make_pair(n1.uuid(), 0)); reg.members().insert_unique(std::make_pair(n2.uuid(), 0)); reg.members().insert_unique(std::make_pair(n3.uuid(), 0)); n1.p().handle_view(reg); n2.p().handle_view(reg); n3.p().handle_view(reg); Datagram* dg(n1.tp().out()); fail_unless(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; dg = n2.tp().out(); fail_unless(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); delete dg; dg = n3.tp().out(); fail_unless(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); delete dg; dg = n1.tp().out(); fail_unless(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; fail_unless(n1.tp().out() == 0 && n1.p().state() == gcomm::pc::Proto::S_PRIM); fail_unless(n2.tp().out() == 0 && n2.p().state() == gcomm::pc::Proto::S_PRIM); fail_unless(n3.tp().out() == 0 && n3.p().state() == gcomm::pc::Proto::S_PRIM); } mark_point(); // drop n1 from view and deliver only state messages in // the following reg view { gcomm::View tr(0, gcomm::ViewId(V_TRANS, n1.uuid(), 2)); tr.members().insert_unique(std::make_pair(n2.uuid(), 0)); tr.members().insert_unique(std::make_pair(n3.uuid(), 0)); n2.p().handle_view(tr); n3.p().handle_view(tr); gcomm::View reg(0, gcomm::ViewId(V_REG, n2.uuid(), 3)); reg.members().insert_unique(std::make_pair(n2.uuid(), 0)); reg.members().insert_unique(std::make_pair(n3.uuid(), 0)); n2.p().handle_view(reg); n3.p().handle_view(reg); Datagram* dg(n2.tp().out()); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); delete dg; dg = n3.tp().out(); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); delete dg; } mark_point(); // drop n2 from view and make sure that n3 ends in non-prim { gcomm::View tr(0, gcomm::ViewId(V_TRANS, n2.uuid(), 3)); tr.members().insert_unique(std::make_pair(n3.uuid(), 0)); n3.p().handle_view(tr); fail_unless(n3.tp().out() == 0 && n3.p().state() == gcomm::pc::Proto::S_TRANS); gcomm::View reg(0, gcomm::ViewId(V_REG, n3.uuid(), 4)); reg.members().insert_unique(std::make_pair(n3.uuid(), 0)); n3.p().handle_view(reg); fail_unless(n3.p().state() == gcomm::pc::Proto::S_STATES_EXCH); Datagram* dg(n3.tp().out()); fail_unless(dg != 0); n3.p().handle_up(0, *dg, ProtoUpMeta(n3.uuid())); dg = n3.tp().out(); fail_unless(dg == 0 && n3.p().state() == gcomm::pc::Proto::S_NON_PRIM, "%p %d", dg, n3.p().state()); } } END_TEST START_TEST(test_fifo_violation) { log_info << "START (test_fifo_violation)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); assert(pc1.state() == Proto::S_PRIM); pu1.send(); pu1.send(); Datagram* dg1(tp1.out()); fail_unless(dg1 != 0); Datagram* dg2(tp1.out()); fail_unless(dg2 != 0); try { pc1.handle_up(0, *dg2, ProtoUpMeta(uuid1, ViewId(), 0, 0xff, O_SAFE)); fail(""); } catch (Exception& e) { fail_unless(e.get_errno() == ENOTRECOVERABLE); } delete dg1; delete dg2; } END_TEST START_TEST(test_checksum) { log_info << "START (test_checksum)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); conf.set(Conf::PcChecksum, gu::to_string(true)); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); assert(pc1.state() == Proto::S_PRIM); pu1.send(); Datagram* dg(tp1.out()); fail_unless(dg != 0); dg->normalize(); pc1.handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; pu1.send(); dg = tp1.out(); fail_unless(dg != 0); dg->normalize(); *(&dg->payload()[0] + dg->payload().size() - 1) ^= 0x10; try { pc1.handle_up(0, *dg, ProtoUpMeta(uuid1)); fail(""); } catch (Exception& e) { fail_unless(e.get_errno() == ENOTRECOVERABLE); } delete dg; } END_TEST START_TEST(test_set_param) { log_info << "START (test_pc_transport)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); auto_ptr net(Protonet::create(conf)); PCUser2 pu1(*net, "pc://?" "evs.info_log_mask=0xff&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "pc.recovery=0&" "node.name=n1"); pu1.start(); // no such a parameter fail_unless(net->set_param("foo.bar", "1") == false); const evs::seqno_t send_window( gu::from_string(conf.get("evs.send_window"))); const evs::seqno_t user_send_window( gu::from_string(conf.get("evs.user_send_window"))); try { net->set_param("evs.send_window", gu::to_string(user_send_window - 1)); fail("exception not thrown"); } catch (gu::Exception& e) { fail_unless(e.get_errno() == ERANGE, "%d: %s", e.get_errno(), e.what()); } try { net->set_param("evs.user_send_window", gu::to_string(send_window + 1)); fail("exception not thrown"); } catch (gu::Exception& e) { fail_unless(e.get_errno() == ERANGE, "%d: %s", e.get_errno(), e.what()); } // Note: These checks may have to change if defaults are changed fail_unless(net->set_param( "evs.send_window", gu::to_string(send_window - 1)) == true); fail_unless(gu::from_string(conf.get("evs.send_window")) == send_window - 1); fail_unless(net->set_param( "evs.user_send_window", gu::to_string(user_send_window + 1)) == true); fail_unless(gu::from_string( conf.get("evs.user_send_window")) == user_send_window + 1); pu1.stop(); } END_TEST START_TEST(test_trac_599) { class D : public gcomm::Toplay { public: D(gu::Config& conf) : gcomm::Toplay(conf) { } void handle_up(const void* id, const Datagram& dg, const gcomm::ProtoUpMeta& um) { } }; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); D d(conf); std::auto_ptr pnet(gcomm::Protonet::create(conf)); std::auto_ptr tp( gcomm::Transport::create (*pnet,"pc://?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0" "&pc.recovery=0")); gcomm::connect(tp.get(), &d); gu::Buffer buf(10); Datagram dg(buf); int err; err = tp->send_down(dg, gcomm::ProtoDownMeta()); fail_unless(err == ENOTCONN, "%d", err); tp->connect(true); buf.resize(tp->mtu()); Datagram dg2(buf); err = tp->send_down(dg2, gcomm::ProtoDownMeta()); fail_unless(err == 0, "%d", err); buf.resize(buf.size() + 1); Datagram dg3(buf); err = tp->send_down(dg3, gcomm::ProtoDownMeta()); fail_unless(err == EMSGSIZE, "%d", err); pnet->event_loop(gu::datetime::Sec); tp->close(); } END_TEST // test for forced teardown START_TEST(test_trac_620) { gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); auto_ptr net(Protonet::create(conf)); Transport* tp(Transport::create(*net, "pc://?" "evs.info_log_mask=0xff&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "pc.recovery=0&" "node.name=n1")); class D : public gcomm::Toplay { public: D(gu::Config& conf) : gcomm::Toplay(conf) { } void handle_up(const void* id, const Datagram& dg, const gcomm::ProtoUpMeta& um) { } }; D d(conf); gcomm::connect(tp, &d); tp->connect(true); tp->close(true); gcomm::disconnect(tp, &d); delete tp; } END_TEST START_TEST(test_trac_277) { log_info << "START (test_trac_277)"; size_t n_nodes(3); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } log_info << "generate messages"; send_n(dn[0], 1); send_n(dn[1], 1); send_n(dn[2], 1); gu_trace(prop.propagate_until_empty()); log_info << "isolate 3"; prop.split(1, 3); prop.split(2, 3); ++view_seq; set_cvi(dn, 0, 1, view_seq, V_PRIM); set_cvi(dn, 2, 2, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); log_info << "isolate 1 and 2"; ++view_seq; prop.split(1, 2); set_cvi(dn, 0, 1, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); log_info << "merge 1 and 2"; ++view_seq; prop.merge(1, 2); set_cvi(dn, 0, 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); log_info << "merge 3"; ++view_seq; prop.merge(1, 3); prop.merge(2, 3); set_cvi(dn, 0, 2, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST // This test checks the case when another node of two node cluster // crashes or becomes completely isolated and prim view of cluster // is established by starting third instance directly in prim mode. START_TEST(test_trac_622_638) { log_info << "START (test_trac_622_638)"; vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; // Create two node cluster and make it split. First node is // considered crashed after split (stay isolated in non-prim). dn.push_back(create_dummy_node(1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[0], true)); set_cvi(dn, 0, 0, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); dn.push_back(create_dummy_node(2, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[1], false)); set_cvi(dn, 0, 1, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); log_info << "generate messages"; send_n(dn[0], 1); send_n(dn[1], 1); gu_trace(prop.propagate_until_empty()); log_info << "isolate 1 and 2"; prop.split(1, 2); ++view_seq; set_cvi(dn, 0, 0, view_seq, V_NON_PRIM); set_cvi(dn, 1, 1, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); // Add third node which will be connected with node 2. This will // be started with prim status. dn.push_back(create_dummy_node(3, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[2], true)); prop.split(1, 3); // avoid 1 <-> 3 communication ++view_seq; set_cvi(dn, 1, 2, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_weighted_quorum) { log_info << "START (test_weighted_quorum)"; size_t n_nodes(3); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period, i)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } // split node 3 (weight 2) out, node 3 should remain in prim while // nodes 1 and 2 (weights 0 + 1 = 1) should end up in non-prim prop.split(1, 3); prop.split(2, 3); ++view_seq; set_cvi(dn, 0, 1, view_seq, V_NON_PRIM); set_cvi(dn, 2, 2, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } END_TEST // // The scenario is the following (before fix): // // - Two nodes 2 and 3 started with weights 1 // - Third node 1 with weight 3 is brought in the cluster // (becomes representative) // - Partitioning to (1) and (2, 3) happens so that INSTALL message is // delivered on 2 and 3 in TRANS and on 1 in REG // - Node 1 forms PC // - Nodes 2 and 3 renegotiate and form PC too because node 1 was not present // in the previous PC // // What should happen is that nodes 2 and 3 recompute quorum on handling // install message and shift to non-PC // START_TEST(test_weighted_partitioning_1) { log_info << "START (test_weighted_partitioning_1)"; gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); single_boot(0, &pu3); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu3, &pu2); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); // trans views { View tr1(0, ViewId(V_TRANS, uuid1, 0)); tr1.add_member(uuid1, 0); pu1.pc()->connect(false); ProtoUpMeta um1(UUID::nil(), ViewId(), &tr1); pu1.pc()->handle_up(0, Datagram(), um1); View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); ProtoUpMeta um23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), um23); pu3.pc()->handle_up(0, Datagram(), um23); } // reg view { View reg(0, ViewId(V_REG, uuid1, pu2.pc()->current_view().id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); reg.add_member(uuid3, 0); ProtoUpMeta um(UUID::nil(), ViewId(), ®); pu1.pc()->handle_up(0, Datagram(), um); pu2.pc()->handle_up(0, Datagram(), um); pu3.pc()->handle_up(0, Datagram(), um); } // states exch { Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; dg = pu2.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; fail_unless(pu2.tp()->out() == 0); fail_unless(pu3.tp()->out() == 0); } // install msg { Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); fail_unless(pu1.pc()->state() == Proto::S_PRIM); // trans view for 2 and 3 View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); tr23.add_partitioned(uuid1, 0); ProtoUpMeta trum23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), trum23); pu3.pc()->handle_up(0, Datagram(), trum23); // 2 and 3 handle install pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; // reg view for 2 and 3 View reg23(0, ViewId(V_REG, uuid2, pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(uuid2, 0); reg23.add_member(uuid3, 0); ProtoUpMeta rum23(UUID::nil(), ViewId(), ®23); pu2.pc()->handle_up(0, Datagram(), rum23); pu3.pc()->handle_up(0, Datagram(), rum23); // states exch dg = pu2.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; // 2 and 3 should end up in non prim fail_unless(pu2.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu2.pc()->state()).c_str()); fail_unless(pu3.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu3.pc()->state()).c_str()); } } END_TEST // // - Two nodes 2 and 3 started with weights 1 // - Third node 1 with weight 3 is brought in the cluster // (becomes representative) // - Partitioning to (1) and (2, 3) happens so that INSTALL message is // delivered in trans view on all nodes // - All nodes should end up in non-prim, nodes 2 and 3 because they don't know // if node 1 ended up in prim (see test_weighted_partitioning_1 above), // node 1 because it hasn't been in primary before and fails to deliver // install message in reg view // START_TEST(test_weighted_partitioning_2) { log_info << "START (test_weighted_partitioning_2)"; gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); single_boot(0, &pu3); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu3, &pu2); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); // trans views { View tr1(0, ViewId(V_TRANS, uuid1, 0)); tr1.add_member(uuid1, 0); pu1.pc()->connect(false); ProtoUpMeta um1(UUID::nil(), ViewId(), &tr1); pu1.pc()->handle_up(0, Datagram(), um1); View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); ProtoUpMeta um23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), um23); pu3.pc()->handle_up(0, Datagram(), um23); } // reg view { View reg(0, ViewId(V_REG, uuid1, pu2.pc()->current_view().id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); reg.add_member(uuid3, 0); ProtoUpMeta um(UUID::nil(), ViewId(), ®); pu1.pc()->handle_up(0, Datagram(), um); pu2.pc()->handle_up(0, Datagram(), um); pu3.pc()->handle_up(0, Datagram(), um); } // states exch { Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; dg = pu2.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; fail_unless(pu2.tp()->out() == 0); fail_unless(pu3.tp()->out() == 0); } // install msg { Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); // trans view for 1 View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); tr1.add_partitioned(uuid2, 0); tr1.add_partitioned(uuid3, 0); ProtoUpMeta trum1(UUID::nil(), ViewId(), &tr1); pu1.pc()->handle_up(0, Datagram(), trum1); fail_unless(pu1.pc()->state() == Proto::S_TRANS); // 1 handle install pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); fail_unless(pu1.pc()->state() == Proto::S_TRANS); // trans view for 2 and 3 View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); tr23.add_partitioned(uuid1, 0); ProtoUpMeta trum23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), trum23); pu3.pc()->handle_up(0, Datagram(), trum23); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu3.pc()->state() == Proto::S_TRANS); // 2 and 3 handle install pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu3.pc()->state() == Proto::S_TRANS); delete dg; // reg view for 1 View reg1(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg1.add_member(uuid1, 0); ProtoUpMeta rum1(UUID::nil(), ViewId(), ®1); pu1.pc()->handle_up(0, Datagram(), rum1); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); // reg view for 2 and 3 View reg23(0, ViewId(V_REG, uuid2, pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(uuid2, 0); reg23.add_member(uuid3, 0); ProtoUpMeta rum23(UUID::nil(), ViewId(), ®23); pu2.pc()->handle_up(0, Datagram(), rum23); pu3.pc()->handle_up(0, Datagram(), rum23); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu3.pc()->state() == Proto::S_STATES_EXCH); // states exch dg = pu1.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); fail_unless(pu1.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu1.pc()->state()).c_str()); delete dg; dg = pu2.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; fail_unless(pu2.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu2.pc()->state()).c_str()); fail_unless(pu3.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu3.pc()->state()).c_str()); } } END_TEST // // - Nodes 1-3 started with equal weights // - Weight for node 1 is changed to 3 // - Group splits to (1), (2, 3) // - Weigh changing message is delivered in reg view in (1) and in // trans in (2, 3) // - Expected outcome: 1 stays in prim, 2 and 3 end up in non-prim // START_TEST(test_weight_change_partitioning_1) { log_info << "START (test_weight_change_partitioning_1)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "1"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); // weight change { pu1.pc()->set_param("pc.weight", "3"); Datagram* install_dg(pu1.tp()->out()); fail_unless(install_dg != 0); // node 1 handle weight change install, proceed to singleton prim pu1.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pu1.pc()->state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, pu1.uuid(), pu1.pc()->current_view().id().seq() + 1)); reg1.add_member(pu1.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; fail_unless(pu1.pc()->state() == Proto::S_INSTALL); dg = pu1.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; fail_unless(pu1.pc()->state() == Proto::S_PRIM); // nodes 2 and 3 go to trans, handle install View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(pu2.uuid(), 0); tr23.add_member(pu3.uuid(), 0); tr23.add_partitioned(pu1.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu3.pc()->state() == Proto::S_TRANS); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View reg23(0, ViewId(V_REG, pu2.uuid(), pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(pu2.uuid(), 0); reg23.add_member(pu3.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu3.pc()->state() == Proto::S_STATES_EXCH); dg = pu2.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; fail_unless(pu2.pc()->state() == Proto::S_NON_PRIM); fail_unless(pu3.pc()->state() == Proto::S_NON_PRIM); delete install_dg; } } END_TEST // // - Nodes 2 and 3 start with weight 1, node 1 with weight 3 // - Weight for node 1 is changed to 1 // - Group splits to (1), (2, 3) // - Weigh changing message is delivered in reg view in (1) and in // trans in (2, 3) // - Expected outcome: all nodes go non-prim // START_TEST(test_weight_change_partitioning_2) { log_info << "START (test_weight_change_partitioning_2)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); // weight change { pu1.pc()->set_param("pc.weight", "1"); Datagram* install_dg(pu1.tp()->out()); fail_unless(install_dg != 0); // node 1 handle weight change install, proceed to singleton prim pu1.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pu1.pc()->state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, pu1.uuid(), pu1.pc()->current_view().id().seq() + 1)); reg1.add_member(pu1.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; fail_unless(pu1.pc()->state() == Proto::S_NON_PRIM); // nodes 2 and 3 go to trans, handle install View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(pu2.uuid(), 0); tr23.add_member(pu3.uuid(), 0); tr23.add_partitioned(pu1.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu3.pc()->state() == Proto::S_TRANS); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View reg23(0, ViewId(V_REG, pu2.uuid(), pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(pu2.uuid(), 0); reg23.add_member(pu3.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu3.pc()->state() == Proto::S_STATES_EXCH); dg = pu2.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; fail_unless(pu2.pc()->state() == Proto::S_NON_PRIM); fail_unless(pu3.pc()->state() == Proto::S_NON_PRIM); delete install_dg; } } END_TEST // // Weight changing message is delivered in transitional view when new node is // joining. All nodes should end up in prim. // START_TEST(test_weight_change_joining) { log_info << "START (test_weight_change_joining)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "1"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); // weight change { pu1.pc()->set_param("pc.weight", "1"); Datagram* install_dg(pu1.tp()->out()); fail_unless(install_dg != 0); // trans views { View tr12(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr12.add_member(pu1.uuid(), 0); tr12.add_member(pu2.uuid(), 0); ProtoUpMeta trum12(UUID::nil(), ViewId(), &tr12); pu1.pc()->handle_up(0, Datagram(), trum12); pu2.pc()->handle_up(0, Datagram(), trum12); fail_unless(pu1.pc()->state() == Proto::S_TRANS); fail_unless(pu2.pc()->state() == Proto::S_TRANS); // deliver weight change install in trans view pu1.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->connect(false); View tr3(0, ViewId(V_TRANS, pu3.uuid(), 0)); tr3.add_member(pu3.uuid(), 0); ProtoUpMeta trum3(UUID::nil(), ViewId(), &tr3); pu3.pc()->handle_up(0, Datagram(), trum3); fail_unless(pu3.pc()->state() == Proto::S_TRANS); } // reg view { View reg(0, ViewId(V_REG, pu1.uuid(), pu1.pc()->current_view().id().seq() + 1)); reg.add_member(pu1.uuid(), 0); reg.add_member(pu2.uuid(), 0); reg.add_member(pu3.uuid(), 0); ProtoUpMeta pum(UUID::nil(), ViewId(), ®); pu1.pc()->handle_up(0, Datagram(), pum); pu2.pc()->handle_up(0, Datagram(), pum); pu3.pc()->handle_up(0, Datagram(), pum); fail_unless(pu1.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu3.pc()->state() == Proto::S_STATES_EXCH); } // states exch { Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; dg = pu2.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; fail_unless(pu1.pc()->state() == Proto::S_INSTALL); fail_unless(pu2.pc()->state() == Proto::S_INSTALL); fail_unless(pu3.pc()->state() == Proto::S_INSTALL); } // install { Datagram* dg(pu1.tp()->out()); fail_unless(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; fail_unless(pu1.pc()->state() == Proto::S_PRIM); fail_unless(pu2.pc()->state() == Proto::S_PRIM); fail_unless(pu3.pc()->state() == Proto::S_PRIM); } delete install_dg; } } END_TEST // // One of the nodes leaves gracefully from group and weight change message // is delivered in trans view. Remaining nodes must not enter non-prim. // START_TEST(test_weight_change_leaving) { log_info << "START (test_weight_change_leaving)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "2"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); // weight change { // change weight for node 2 while node 1 leaves the group gracefully pu2.pc()->set_param("pc.weight", "1"); Datagram* install_dg(pu2.tp()->out()); fail_unless(install_dg != 0); // nodes 2 and 3 go to trans, handle install View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(pu2.uuid(), 0); tr23.add_member(pu3.uuid(), 0); tr23.add_left(pu1.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); fail_unless(pu2.pc()->state() == Proto::S_TRANS); fail_unless(pu3.pc()->state() == Proto::S_TRANS); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View reg23(0, ViewId(V_REG, pu2.uuid(), pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(pu2.uuid(), 0); reg23.add_member(pu3.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); fail_unless(pu2.pc()->state() == Proto::S_STATES_EXCH); fail_unless(pu3.pc()->state() == Proto::S_STATES_EXCH); Datagram* dg(pu2.tp()->out()); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; fail_unless(pu2.pc()->state() == Proto::S_INSTALL); fail_unless(pu3.pc()->state() == Proto::S_INSTALL); dg = pu2.tp()->out(); fail_unless(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; fail_unless(pu2.pc()->state() == Proto::S_PRIM); fail_unless(pu3.pc()->state() == Proto::S_PRIM); delete install_dg; } } END_TEST // node1 and node2 are a cluster. // before node3 joins, node2 lost connection to node1 and node3. // after node1 and node3 merged, node2 joins. // we expect all nodes are a cluster, and they are all in prim state. static void _test_join_split_cluster( const UUID& uuid1, const UUID& uuid2, const UUID& uuid3) { // construct restored view. const UUID& prim_uuid = uuid1 < uuid2 ? uuid1 : uuid2; View rst_view(0, ViewId(V_PRIM, prim_uuid, 0)); rst_view.add_member(uuid1, 0); rst_view.add_member(uuid2, 0); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); pc1.set_restored_view(&rst_view); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); pc2.set_restored_view(&rst_view); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); // assume previous cluster is node1 and node3. const UUID& prim_uuid2 = uuid1 < uuid3 ? uuid1 : uuid3; View rst_view2(0, ViewId(V_PRIM, prim_uuid2, 0)); rst_view2.add_member(uuid1, 0); rst_view2.add_member(uuid3, 0); pc3.set_restored_view(&rst_view2); { uint32_t seq = pc1.current_view().id().seq(); const UUID& reg_uuid = pu1.uuid() < pu3.uuid() ? pu1.uuid() : pu3.uuid(); // node1 View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pc1.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, reg_uuid, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu3.uuid(), 0); reg1.add_partitioned(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc1.state() == Proto::S_STATES_EXCH); // node3 View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); pc3.connect(false); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); fail_unless(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, reg_uuid, seq + 1)); reg3.add_member(pu1.uuid(), 0); reg3.add_member(pu3.uuid(), 0); reg3.add_joined(pu1.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); fail_unless(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); fail_unless(dg1 != 0); Datagram* dg3(pu3.tp()->out()); fail_unless(dg3 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); fail_unless(pc1.state() == Proto::S_NON_PRIM); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); fail_unless(pc3.state() == Proto::S_NON_PRIM); delete dg1; delete dg3; } { // node2 uint32_t seq = pc2.current_view().id().seq(); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); tr2.add_partitioned(pu1.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); fail_unless(pc2.state() == Proto::S_TRANS); View reg2(0, ViewId(V_REG, pc2.uuid(), seq + 1)); reg2.add_member(pu2.uuid(), 0); reg2.add_partitioned(pu1.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); fail_unless(pc2.state() == Proto::S_STATES_EXCH); Datagram* dg2(pu2.tp()->out()); fail_unless(dg2 != 0); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); fail_unless(pc2.state() == Proto::S_NON_PRIM); } { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_member(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pc1.state() == Proto::S_TRANS); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); fail_unless(pc2.state() == Proto::S_TRANS); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu1.uuid(), 0); tr3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); fail_unless(pc3.state() == Proto::S_TRANS); int seq = pc1.current_view().id().seq(); const UUID& reg_uuid1 = pu1.uuid() < pu2.uuid() ? pu1.uuid() : pu2.uuid(); const UUID& reg_uuid = reg_uuid1 < pu3.uuid() ? reg_uuid1 : pu3.uuid(); View reg1(0, ViewId(V_REG, reg_uuid, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc1.state() == Proto::S_STATES_EXCH); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc3.state() == Proto::S_STATES_EXCH); View reg2(0, ViewId(V_REG, reg_uuid, seq + 1)); reg2.add_member(pu1.uuid(), 0); reg2.add_member(pu2.uuid(), 0); reg2.add_member(pu3.uuid(), 0); reg2.add_joined(pu1.uuid(), 0); reg2.add_joined(pu3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); fail_unless(pc2.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); Datagram* dg3(pu3.tp()->out()); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); fail_unless(pc1.state() == Proto::S_INSTALL); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); fail_unless(pc2.state() == Proto::S_INSTALL); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); fail_unless(pc3.state() == Proto::S_INSTALL); delete dg1; delete dg2; delete dg3; Datagram* dg = 0; PCUser* pcs[3] = {&pu1, &pu2, &pu3}; for (int i=0; i<3; i++) { if (pcs[i]->uuid() == reg_uuid) { dg = pcs[i]->tp()->out(); fail_unless(dg != 0); } else { fail_if(pcs[i]->tp()->out()); } } pc1.handle_up(0, *dg, ProtoUpMeta(reg_uuid)); pc2.handle_up(0, *dg, ProtoUpMeta(reg_uuid)); pc3.handle_up(0, *dg, ProtoUpMeta(reg_uuid)); fail_unless(pc1.state() == Proto::S_PRIM); fail_unless(pc2.state() == Proto::S_PRIM); fail_unless(pc3.state() == Proto::S_PRIM); } } START_TEST(test_join_split_cluster) { log_info << "START (test_join_split_cluster)"; gu_conf_debug_on(); UUID uuid1(1); UUID uuid2(2); UUID uuid3(3); _test_join_split_cluster(uuid1, uuid2, uuid3); _test_join_split_cluster(uuid2, uuid1, uuid3); _test_join_split_cluster(uuid2, uuid3, uuid1); } END_TEST START_TEST(test_trac_762) { log_info << "START (trac_762)"; size_t n_nodes(3); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } log_info << "split 1"; // split group so that node 3 becomes isolated prop.split(1, 3); prop.split(2, 3); ++view_seq; set_cvi(dn, 0, 1, view_seq, V_PRIM); set_cvi(dn, 2, 2, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); mark_point(); log_info << "remerge 1"; // detach PC layer from EVS and lower layers, attach to DummyTransport for (size_t i(0); i < n_nodes; ++i) { std::list::iterator li0(dn[i]->protos().begin()); std::list::iterator li1(li0); ++li1; assert(li1 != dn[i]->protos().end()); std::list::iterator li2(li1); ++li2; assert(li2 != dn[i]->protos().end()); gcomm::disconnect(*li0, *li1); gcomm::disconnect(*li1, *li2); delete *li0; delete *li1; dn[i]->protos().pop_front(); dn[i]->protos().pop_front(); DummyTransport* tp(new DummyTransport(dn[i]->uuid(), true)); dn[i]->protos().push_front(tp); gcomm::connect(tp, *li2); } Proto* pc1(pc_from_dummy(dn[0])); DummyTransport* tp1(reinterpret_cast( dn[0]->protos().front())); Proto* pc2(pc_from_dummy(dn[1])); DummyTransport* tp2(reinterpret_cast( dn[1]->protos().front())); Proto* pc3(pc_from_dummy(dn[2])); DummyTransport* tp3(reinterpret_cast( dn[2]->protos().front())); // remerge group, process event by event so that nodes 1 and 2 handle // install message in reg view and reach prim view, node 3 partitions and // handles install in trans view and marks nodes 1 and 2 to have un state { View tr1(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr1.add_member(tp1->uuid(), 0); tr1.add_member(tp2->uuid(), 0); pc1->handle_view(tr1); pc2->handle_view(tr1); View tr2(0, ViewId(V_TRANS, tp3->uuid(), view_seq)); tr2.add_member(tp3->uuid(), 0); pc3->handle_view(tr2); ++view_seq; View reg(0, ViewId(V_REG, tp1->uuid(), view_seq)); reg.add_member(tp1->uuid(), 0); reg.add_member(tp2->uuid(), 0); reg.add_member(tp3->uuid(), 0); pc1->handle_view(reg); pc2->handle_view(reg); pc3->handle_view(reg); // states exch Datagram* dg(tp1->out()); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; dg = tp2->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); delete dg; dg = tp3->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); delete dg; // install message dg = tp1->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); View tr3(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr3.add_member(tp1->uuid(), 0); tr3.add_member(tp2->uuid(), 0); tr3.add_partitioned(tp3->uuid(), 0); pc1->handle_view(tr3); pc2->handle_view(tr3); View tr4(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr4.add_member(tp3->uuid(), 0); tr4.add_partitioned(tp1->uuid(), 0); tr4.add_partitioned(tp2->uuid(), 0); pc3->handle_view(tr4); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; } ++view_seq; // ... intermediate reg/trans views // 1 and 2 { View reg(0, ViewId(V_REG, tp1->uuid(), view_seq)); reg.add_member(tp1->uuid(), 0); reg.add_member(tp2->uuid(), 0); pc1->handle_view(reg); pc2->handle_view(reg); View tr(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr.add_member(tp1->uuid(), 0); tr.add_member(tp2->uuid(), 0); pc1->handle_view(tr); pc2->handle_view(tr); Datagram* dg(tp1->out()); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; dg = tp2->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; } // 3 { View reg(0, ViewId(V_REG, tp3->uuid(), view_seq)); reg.add_member(tp3->uuid(), 0); pc3->handle_view(reg); Datagram* dg(tp3->out()); fail_unless(dg != 0); pc3->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); delete dg; View tr(0, ViewId(V_TRANS, tp3->uuid(), view_seq)); tr.add_member(tp3->uuid(), 0); pc3->handle_view(tr); } // Remerge and PC crash should occur if bug is present. ++view_seq; { View reg(0, ViewId(V_REG, tp1->uuid(), view_seq)); reg.add_member(tp1->uuid(), 0); reg.add_member(tp2->uuid(), 0); reg.add_member(tp3->uuid(), 0); pc1->handle_view(reg); pc2->handle_view(reg); pc3->handle_view(reg); // State msgs Datagram* dg(tp1->out()); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; dg = tp2->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); delete dg; dg = tp3->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); delete dg; // Install msg dg = tp1->out(); fail_unless(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); fail_unless(tp1->out() == 0); fail_unless(tp2->out() == 0); fail_unless(tp3->out() == 0); } } END_TEST START_TEST(test_gh_92) { UUID uuid1(1), uuid2(2), uuid3(3); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); uint32_t seq = pc1.current_view().id().seq(); Datagram* im = 0; Datagram* dg = 0; // they split into three parts. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pc1.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc1.state() == Proto::S_STATES_EXCH); dg = pu1.tp()->out(); pc1.handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); fail_unless(pc1.state() == Proto::S_NON_PRIM); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); tr2.add_partitioned(pu1.uuid(), 0); tr2.add_partitioned(pu3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); fail_unless(pc2.state() == Proto::S_TRANS); View reg2(0, ViewId(V_REG, uuid2, seq + 1)); reg2.add_member(pu2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); fail_unless(pc2.state() == Proto::S_STATES_EXCH); dg = pu2.tp()->out(); pc2.handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); fail_unless(pc2.state() == Proto::S_NON_PRIM); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); tr3.add_partitioned(pu1.uuid(), 0); tr3.add_partitioned(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); fail_unless(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid3, seq + 1)); reg3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); fail_unless(pc3.state() == Proto::S_STATES_EXCH); dg = pu3.tp()->out(); pc3.handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); fail_unless(pc3.state() == Proto::S_NON_PRIM); } seq += 1; // they try to merge into a primary component, but fails when sending install message. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pc1.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu2.uuid(), 0); reg1.add_joined(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc1.state() == Proto::S_STATES_EXCH); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); fail_unless(pc2.state() == Proto::S_TRANS); View reg2(0, ViewId(V_REG, uuid1, seq + 1)); reg2.add_member(pu1.uuid(), 0); reg2.add_member(pu2.uuid(), 0); reg2.add_member(pu3.uuid(), 0); reg2.add_joined(pu1.uuid(), 0); reg2.add_joined(pu3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); fail_unless(pc2.state() == Proto::S_STATES_EXCH); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); fail_unless(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, seq + 1)); reg3.add_member(pu1.uuid(), 0); reg3.add_member(pu2.uuid(), 0); reg3.add_member(pu3.uuid(), 0); reg3.add_joined(pu1.uuid(), 0); reg3.add_joined(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); fail_unless(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); Datagram* dg3(pu3.tp()->out()); fail_unless(dg1 != 0); fail_unless(dg2 != 0); fail_unless(dg3 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); delete dg1; delete dg2; delete dg3; fail_unless(pc1.state() == Proto::S_INSTALL); fail_unless(pc2.state() == Proto::S_INSTALL); fail_unless(pc3.state() == Proto::S_INSTALL); im = pu1.tp()->out(); fail_unless(im != 0); fail_unless(pu2.tp()->out() == 0); fail_unless(pu3.tp()->out() == 0); } seq += 1; // node3 is separate from node1 and node2. // they get the stale install message when they get transient view. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_member(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pc1.state() == Proto::S_TRANS); fail_unless(pc2.state() == Proto::S_TRANS); pc1.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); fail_unless(pc1.state() == Proto::S_TRANS); fail_unless(pc2.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_partitioned(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc1.state() == Proto::S_STATES_EXCH); fail_unless(pc2.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); fail_unless(dg1 != 0); fail_unless(dg2 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); fail_unless(pc1.state() == Proto::S_NON_PRIM); fail_unless(pc2.state() == Proto::S_NON_PRIM); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); tr3.add_partitioned(pu1.uuid(), 0); tr3.add_partitioned(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); fail_unless(pc3.state() == Proto::S_TRANS); pc3.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); fail_unless(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid3, seq + 1)); reg3.add_member(pu3.uuid(), 0); reg3.add_partitioned(pu1.uuid(), 0); reg3.add_partitioned(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); fail_unless(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg3(pu3.tp()->out()); fail_unless(dg3 != 0); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); fail_unless(pc3.state() == Proto::S_NON_PRIM); } seq += 1; // then they try to merge into a primary component again. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_member(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); fail_unless(pc1.state() == Proto::S_TRANS); fail_unless(pc2.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); fail_unless(pc1.state() == Proto::S_STATES_EXCH); fail_unless(pc2.state() == Proto::S_STATES_EXCH); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); fail_unless(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, seq + 1)); reg3.add_member(pu1.uuid(), 0); reg3.add_member(pu2.uuid(), 0); reg3.add_member(pu3.uuid(), 0); reg3.add_joined(pu1.uuid(), 0); reg3.add_joined(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); fail_unless(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); Datagram* dg3(pu3.tp()->out()); fail_unless(dg1 != 0); fail_unless(dg2 != 0); fail_unless(dg3 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); delete dg1; delete dg2; delete dg3; fail_unless(pc1.state() == Proto::S_INSTALL); fail_unless(pc2.state() == Proto::S_INSTALL); fail_unless(pc3.state() == Proto::S_INSTALL); im = pu1.tp()->out(); fail_unless(im != 0); fail_unless(pu2.tp()->out() == 0); fail_unless(pu3.tp()->out() == 0); pc1.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); fail_unless(pc1.state() == Proto::S_PRIM); fail_unless(pc2.state() == Proto::S_PRIM); fail_unless(pc3.state() == Proto::S_PRIM); } } END_TEST // Nodes 1, 2, 3. Node 3 will be evicted from group while group is // fully partitioned. After remerging 1 and 2 they should reach // primary component. START_TEST(test_prim_after_evict) { log_info << "START(test_prim_after_evict)"; UUID uuid1(1), uuid2(2), uuid3(3); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(1, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(1, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(1, &pu1, &pu2, &pu3); // Node 1 partitions { // Trans view View tr1(1, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pc1.uuid(), 0); tr1.add_partitioned(pc2.uuid(), 0); tr1.add_partitioned(pc3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); // Reg view View reg1(1, ViewId(V_REG, pc1.uuid(), tr1.id().seq() + 1)); reg1.add_member(pc1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); // States exch Datagram* dg(tp1.out()); fail_unless(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); delete dg; // Non-prim dg = tp1.out(); fail_unless(dg == 0); fail_unless(pc1.state() == Proto::S_NON_PRIM); } // Node 2 partitions { // Trans view View tr2(1, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pc2.uuid(), 0); tr2.add_partitioned(pc1.uuid(), 0); tr2.add_partitioned(pc3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); // Reg view View reg2(1, ViewId(V_REG, pc2.uuid(), tr2.id().seq() + 1)); reg2.add_member(pc2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); // States exch Datagram* dg(tp2.out()); fail_unless(dg != 0); pc2.handle_up(0, *dg, ProtoUpMeta(pc2.uuid())); delete dg; // Non-prim dg = tp2.out(); fail_unless(dg == 0); fail_unless(pc2.state() == Proto::S_NON_PRIM); } // Just forget about node3, it is gone forever // Nodes 1 and 2 set node3 evicted pc1.evict(pc3.uuid()); pc2.evict(pc3.uuid()); // Nodes 1 and 2 merge and should reach Prim { // Trans view for node 1 View tr1(1, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pc1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); Datagram *dg(tp1.out()); fail_unless(dg == 0); fail_unless(pc1.state() == Proto::S_TRANS); // Trans view for node 2 View tr2(1, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pc2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); dg = tp2.out(); fail_unless(dg == 0); fail_unless(pc2.state() == Proto::S_TRANS); // Reg view for nodes 1 and 2 View reg(1, ViewId(V_REG, pc1.uuid(), tr1.id().seq() + 1)); reg.add_member(pc1.uuid(), 0); reg.add_member(pc2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®)); // States exchange fail_unless(pc1.state() == Proto::S_STATES_EXCH); fail_unless(pc2.state() == Proto::S_STATES_EXCH); // State message from node 1 dg = tp1.out(); fail_unless(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); pc2.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); delete dg; dg = tp1.out(); fail_unless(dg == 0); // State message from node 2 dg = tp2.out(); fail_unless(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc2.uuid())); pc2.handle_up(0, *dg, ProtoUpMeta(pc2.uuid())); delete dg; dg = tp2.out(); fail_unless(dg == 0); // Install fail_unless(pc1.state() == Proto::S_INSTALL, "state is %s", Proto::to_string(pc1.state()).c_str()); fail_unless(pc2.state() == Proto::S_INSTALL, "state is %s", Proto::to_string(pc2.state()).c_str()); // Install message from node 1 dg = tp1.out(); fail_unless(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); pc2.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); delete dg; // Prim dg = tp1.out(); fail_unless(dg == 0); dg = tp2.out(); fail_unless(dg == 0); fail_unless(pc1.state() == Proto::S_PRIM); fail_unless(pc2.state() == Proto::S_PRIM); } } END_TEST Suite* pc_suite() { Suite* s = suite_create("gcomm::pc"); TCase* tc; bool skip = false; if (!skip) { tc = tcase_create("test_pc_messages"); tcase_add_test(tc, test_pc_messages); suite_add_tcase(s, tc); tc = tcase_create("test_pc_view_changes_single"); tcase_add_test(tc, test_pc_view_changes_single); suite_add_tcase(s, tc); tc = tcase_create("test_pc_view_changes_double"); tcase_add_test(tc, test_pc_view_changes_double); suite_add_tcase(s, tc); tc = tcase_create("test_pc_view_changes_reverse"); tcase_add_test(tc, test_pc_view_changes_reverse); suite_add_tcase(s, tc); tc = tcase_create("test_pc_state1"); tcase_add_test(tc, test_pc_state1); suite_add_tcase(s, tc); tc = tcase_create("test_pc_state2"); tcase_add_test(tc, test_pc_state2); suite_add_tcase(s, tc); tc = tcase_create("test_pc_state3"); tcase_add_test(tc, test_pc_state3); suite_add_tcase(s, tc); tc = tcase_create("test_pc_conflicting_prims"); tcase_add_test(tc, test_pc_conflicting_prims); suite_add_tcase(s, tc); tc = tcase_create("test_pc_conflicting_prims_npvo"); tcase_add_test(tc, test_pc_conflicting_prims_npvo); suite_add_tcase(s, tc); if (run_all_pc_tests() == true) { tc = tcase_create("test_pc_split_merge"); tcase_add_test(tc, test_pc_split_merge); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_pc_split_merge_w_user_msg"); tcase_add_test(tc, test_pc_split_merge_w_user_msg); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_pc_complete_split_merge"); tcase_add_test(tc, test_pc_complete_split_merge); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_pc_protocol_upgrade"); tcase_add_test(tc, test_pc_protocol_upgrade); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_pc_transport"); tcase_add_test(tc, test_pc_transport); tcase_set_timeout(tc, 35); suite_add_tcase(s, tc); } tc = tcase_create("test_trac_191"); tcase_add_test(tc, test_trac_191); suite_add_tcase(s, tc); tc = tcase_create("test_trac_413"); tcase_add_test(tc, test_trac_413); suite_add_tcase(s, tc); tc = tcase_create("test_fifo_violation"); tcase_add_test(tc, test_fifo_violation); suite_add_tcase(s, tc); tc = tcase_create("test_checksum"); tcase_add_test(tc, test_checksum); suite_add_tcase(s, tc); tc = tcase_create("test_set_param"); tcase_add_test(tc, test_set_param); suite_add_tcase(s, tc); if (run_all_pc_tests() == true) { tc = tcase_create("test_trac_599"); tcase_add_test(tc, test_trac_599); suite_add_tcase(s, tc); } tc = tcase_create("test_trac_620"); tcase_add_test(tc, test_trac_620); suite_add_tcase(s, tc); if (run_all_pc_tests() == true) { tc = tcase_create("test_trac_277"); tcase_add_test(tc, test_trac_277); suite_add_tcase(s, tc); tc = tcase_create("test_trac_622_638"); tcase_add_test(tc, test_trac_622_638); suite_add_tcase(s, tc); tc = tcase_create("test_weighted_quorum"); tcase_add_test(tc, test_weighted_quorum); suite_add_tcase(s, tc); } tc = tcase_create("test_weighted_partitioning_1"); tcase_add_test(tc, test_weighted_partitioning_1); suite_add_tcase(s, tc); tc = tcase_create("test_weighted_partitioning_2"); tcase_add_test(tc, test_weighted_partitioning_2); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_partitioning_1"); tcase_add_test(tc, test_weight_change_partitioning_1); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_partitioning_2"); tcase_add_test(tc, test_weight_change_partitioning_2); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_joining"); tcase_add_test(tc, test_weight_change_joining); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_leaving"); tcase_add_test(tc, test_weight_change_leaving); suite_add_tcase(s, tc); if (run_all_pc_tests() == true) { tc = tcase_create("test_trac_762"); tcase_add_test(tc, test_trac_762); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); } tc = tcase_create("test_join_split_cluster"); tcase_add_test(tc, test_join_split_cluster); suite_add_tcase(s, tc); tc = tcase_create("test_gh_92"); tcase_add_test(tc, test_gh_92); suite_add_tcase(s, tc); tc = tcase_create("test_prim_after_evict"); tcase_add_test(tc, test_prim_after_evict); suite_add_tcase(s, tc); } return s; } galera-3-25.3.20/gcomm/test/ssl_test.cpp0000644000015300001660000001126713042054732017572 0ustar jenkinsjenkins/* Copyrignt (C) 2014 Codership Oy */ #include "gcomm/protonet.hpp" #include "gcomm/util.hpp" #include "gcomm/conf.hpp" #include #include static gu::Config conf; class Client : public gcomm::Toplay { public: Client(gcomm::Protonet& pnet, const std::string& uri) : gcomm::Toplay(conf), uri_ (uri), pnet_ (pnet), pstack_(), socket_(pnet_.socket(uri)), msg_ () { pstack_.push_proto(this); pnet_.insert(&pstack_); } ~Client() { pnet_.erase(&pstack_); pstack_.pop_proto(this); socket_->close(); } void connect(bool f = false) { socket_->connect(uri_); } std::string msg() const { return std::string(msg_.begin(), msg_.end()); } void handle_up(const void* id, const gcomm::Datagram& dg, const gcomm::ProtoUpMeta& um) { if (um.err_no() != 0) { log_error << "socket failed: " << um.err_no(); socket_->close(); throw std::exception(); } else { assert(id == socket_->id()); msg_.insert(msg_.begin(), gcomm::begin(dg), gcomm::begin(dg) + gcomm::available(dg)); } } private: gu::URI uri_; gcomm::Protonet& pnet_; gcomm::Protostack pstack_; gcomm::SocketPtr socket_; gu::Buffer msg_; }; class Server : public gcomm::Toplay { public: Server(gcomm::Protonet& pnet, const std::string& uri) : gcomm::Toplay(conf), uri_(uri), pnet_(pnet), pstack_(), listener_(), smap_(), msg_("hello ssl") { pstack_.push_proto(this); pnet_.insert(&pstack_); listener_ = pnet_.acceptor(uri_); } ~Server() { delete listener_; pnet_.erase(&pstack_); pstack_.pop_proto(this); } void listen() { listener_->listen(uri_); } void handle_up(const void* id, const gcomm::Datagram& dg, const gcomm::ProtoUpMeta& um) { if (id == listener_->id()) { gcomm::SocketPtr socket(listener_->accept()); if (smap_.insert( std::make_pair(socket->id(), socket)).second == false) { throw std::logic_error("duplicate socket entry"); } return; } std::map::iterator si(smap_.find(id)); if (si == smap_.end()) { throw std::logic_error("could not find socket from map"); } gcomm::SocketPtr socket(si->second); if (socket->state() == gcomm::Socket::S_CONNECTED) { gcomm::Datagram msg; msg.payload().resize(msg_.size()); std::copy(msg_.begin(), msg_.end(), msg.payload().begin()); socket->send(msg); } else if (socket->state() == gcomm::Socket::S_CLOSED || socket->state() == gcomm::Socket::S_FAILED) { std::cerr << "socket " << id << " failed" << std::endl; socket->close(); smap_.erase(id); } else { std::cerr << "socket state: " << socket->state() << std::endl; } } private: Server(const Server&); void operator=(const Server&); gu::URI uri_; gcomm::Protonet& pnet_; gcomm::Protostack pstack_; gcomm::Acceptor* listener_; std::map smap_; const std::string msg_; }; int main(int argc, char* argv[]) { if (argc != 4) { std::cerr << "usage: " << argv[0] << " <-s|-c> " << std::endl; return 1; } gu::Config conf; gcomm::Conf::register_params(conf); conf.parse(argv[2]); std::auto_ptr pnet(gcomm::Protonet::create(conf)); if (std::string("-s") == argv[1]) { Server server(*pnet, argv[3]); server.listen(); while (true) { pnet->event_loop(gu::datetime::Period(1 * gu::datetime::Sec)); } } else if (std::string("-c") == argv[1]) { Client client(*pnet, argv[3]); client.connect(); while (true) { pnet->event_loop(gu::datetime::Period(1*gu::datetime::MSec)); std::string msg(client.msg()); if (msg != "") { std::cout << "read message from server: '" << msg << "'" << std::endl; break; } } } return 0; } galera-3-25.3.20/scripts/0000755000015300001660000000000013042054732014625 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/source/0000755000015300001660000000000013042054732016125 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/source/COPYING0000644000015300001660000004325413042054732017170 0ustar jenkinsjenkins GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. galera-3-25.3.20/scripts/source/build.sh0000755000015300001660000000354713042054732017574 0ustar jenkinsjenkins#!/bin/bash -eu # $Id: build.sh 1323 2009-11-22 23:48:22Z alex $ usage() { echo -e "Usage: build.sh [script options] [configure options]\n"\ "Script options:\n"\ " -h|--help this help message\n"\ " -i|--install install libraries system-wide\n" } INSTALL="no" CONFIGURE="no" LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-""} CPPFLAGS=${CPPFLAGS:-""} LDFLAGS=${LDFLAGS:-""} if ccache -V > /dev/null 2>&1 then CC=${CC:-"gcc"} CXX=${CXX:-"g++"} echo "$CC" | grep "ccache" > /dev/null || CC="ccache $CC" echo "$CXX" | grep "ccache" > /dev/null || CXX="ccache $CXX" export CC CXX fi while test $# -gt 0 do case $1 in -i|--install) INSTALL="yes" shift ;; -h|--help) usage exit 1 ;; *) # what's left is the arguments for configure CONFIGURE="yes" break ;; esac done # Build process base directory build_base=$(cd $(dirname $0); pwd -P) # Updates build flags for the next stage build_flags() { local build_dir=$1 LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$build_dir/src/.libs CPPFLAGS="$CPPFLAGS -I$build_dir/src " LDFLAGS="$LDFLAGS -L$build_dir/src/.libs" } # Function to build single project build() { local build_dir=$1 shift echo "Building: $build_dir ($@)" pushd $build_dir export LD_LIBRARY_PATH export CPPFLAGS export LDFLAGS local SCRATCH=no if [ ! -s "Makefile" ]; then CONFIGURE=yes; fi if [ "$CONFIGURE" == "yes" ]; then rm -rf config.status; ./configure $@; SCRATCH=yes ; fi if [ "$SCRATCH" == "yes" ]; then make clean ; fi make || return 1 if [ "$INSTALL" == "yes" ] then make install || return 1 else build_flags $(pwd -P) || return 1 fi popd } build "libgalerautils*" $@ build "libgcomm*" $@ build "libgcs*" $@ build "libwsdb*" $@ build "libgalera*" $@ galera-3-25.3.20/scripts/source/README0000644000015300001660000000046013042054732017005 0ustar jenkinsjenkinsThis is release 0.7 of Galera - Codership's implementation of wsrep interface (https://launchpad.net/wsrep) For details see http://www.codership.com. Build order: 1. libgalerautils 2. libgcomm 3. libgcs 4. libwsdb 5. libgalera Building unit tests requires check library (http://check.sourceforge.net) galera-3-25.3.20/scripts/source/package.sh0000755000015300001660000000443113042054732020061 0ustar jenkinsjenkins#!/bin/bash -eu # $Id: build.sh 1323 2009-11-22 23:48:22Z alex $ usage() { echo -e "Usage: build.sh [script options] [configure options] \n" \ "Script options:\n" \ " -h|--help this help message\n"\ " -r|--release release number to put in the tarball\n"\ " name: galera-source-XXX.tgz" } RELEASE="" CONFIGURE="no" LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-""} CPPFLAGS=${CPPFLAGS:-""} LDFLAGS=${LDFLAGS:-""} while test $# -gt 0 do case $1 in -r|--release) RELEASE=$2 shift shift ;; -h|--help) usage exit 1 ;; *) # what's left is the arguments for configure break ;; esac done # Build process base directory BUILD_BASE=$(cd $(dirname $0)/../../; pwd -P) # Updates build flags for the next stage build_flags() { local build_dir=$1 LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$build_dir/src/.libs CPPFLAGS="$CPPFLAGS -I$build_dir/src " LDFLAGS="$LDFLAGS -L$build_dir/src/.libs" } # Function to build single project build() { local module=$1 shift local build_dir=$BUILD_BASE/$module echo "Building: $build_dir ($@)" pushd $build_dir export LD_LIBRARY_PATH export CPPFLAGS export LDFLAGS if [ ! -x "configure" ]; then ./bootstrap.sh; fi if [ ! -s "Makefile" ]; then ./configure $@; fi local src_base_name="lib${module}-" rm -rf "${src_base_name}"*.tar.gz make dist || return 1 build_flags $build_dir || return 1 local ret=$(ls "${src_base_name}"*.tar.gz) popd echo $build_dir/$ret } build_sources() { local module local srcs="" for module in "galerautils" "gcomm" "gcs" "wsdb" "galera" do src=$(build $module $@ | tail -n 1) || return 1 srcs="$srcs $src" done if [ -z "$RELEASE" ] then pushd "$BUILD_BASE" RELEASE="r$(svnversion | sed s/\:/,/g)" popd fi local dist_dir="galera-source-$RELEASE" rm -rf $dist_dir mkdir -p $dist_dir for src in $srcs do tar -C $dist_dir -xzf $src done cp "README" "COPYING" "build.sh" $dist_dir/ tar -czf $dist_dir.tgz $dist_dir # return absolute path for scripts echo $PWD/$dist_dir.tgz } pushd $(dirname $0) build_sources $@ galera-3-25.3.20/scripts/packages/0000755000015300001660000000000013042054732016403 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/packages/freebsd.sh0000755000015300001660000000453313042054732020361 0ustar jenkinsjenkins#!/bin/bash -eu if [ $# -ne 1 ] then echo "Usage: $0 " exit 1 fi RELEASE=$1 # Absolute path of this script folder SCRIPT_ROOT=$(cd $(dirname $0); pwd -P) PBR="$SCRIPT_ROOT/pkg_top_dir" PBD="$SCRIPT_ROOT/../.." GALERA_LICENSE_DIR="$PBR/share/licenses/galera-$RELEASE" rm -rf "$PBR" mkdir -p "$PBR" install -d "$PBR/"{bin,lib/galera,share/doc/galera,etc/rc.d,libdata/ldconfig} install -m 555 "$PBD/garb/files/freebsd/garb.sh" "$PBR/etc/rc.d/garb" install -m 555 "$PBD/garb/garbd" "$PBR/bin/garbd" install -m 444 "$PBD/libgalera_smm.so" "$PBR/lib/galera/libgalera_smm.so" install -m 444 "$SCRIPT_ROOT/freebsd/galera-ldconfig" "$PBR/libdata/ldconfig/galera" install -m 444 "$PBD/scripts/packages/README" "$PBR/share/doc/galera/" install -m 444 "$PBD/scripts/packages/README-MySQL" "$PBR/share/doc/galera/" install -m 755 -d "$GALERA_LICENSE_DIR" install -m 444 "$PBD/LICENSE" "$GALERA_LICENSE_DIR/GPLv2" install -m 444 "$PBD/scripts/packages/freebsd/LICENSE" "$GALERA_LICENSE_DIR" install -m 444 "$PBD/asio/LICENSE_1_0.txt" "$GALERA_LICENSE_DIR/LICENSE.asio" install -m 444 "$PBD/www.evanjones.ca/LICENSE" "$GALERA_LICENSE_DIR/LICENSE.crc32c" install -m 444 "$PBD/chromium/LICENSE" "$GALERA_LICENSE_DIR/LICENSE.chromium" install -m 444 "$PBD/scripts/packages/freebsd/catalog.mk" "$GALERA_LICENSE_DIR" install -m 644 "$SCRIPT_ROOT/freebsd/galera-"{plist,descr,comment,message} "$PBR" sed -e "s!%{SRCDIR}!$PBR!" -e "s!%{RELEASE}!$RELEASE!" -i "" "$PBR/galera-"{plist,descr,comment,message} \ "$GALERA_LICENSE_DIR/catalog.mk" for pkg in $(grep '^@comment DEPORIGIN:' "$PBR/galera-plist" | cut -d : -f 2); do pkgdep=$(/usr/sbin/pkg_info -q -O "$pkg") if [ -z "$pkgdep" ]; then echo "ERROR: failed to find dependency package '$pkg'" >&2 exit 1 fi sed -e "s!^@comment DEPORIGIN:$pkg!@pkgdep $pkgdep"$'\\\n&!' -i "" "$PBR/galera-plist" done /usr/sbin/pkg_create -c "$SCRIPT_ROOT/freebsd/galera-comment" \ -d "$SCRIPT_ROOT/freebsd/galera-descr" \ -m "$SCRIPT_ROOT/freebsd/galera-mtree" \ -D "$SCRIPT_ROOT/freebsd/galera-message" \ -f "$PBR/galera-plist" \ -v "galera-$1-$(uname -m).tbz" rm -rf "$PBR" exit 0 galera-3-25.3.20/scripts/packages/galera-dev.list0000644000015300001660000000261613042054732021314 0ustar jenkinsjenkins# This is Galera development package description for ESP package manager %include galera-common.inc d 755 root root $INCS_DEST - f 644 root root $INCS_DEST/gcs.h $BUILD_BASE/gcs/src/gcs.h f 644 root root $INCS_DEST/wsdb_api.h $BUILD_BASE/wsdb/src/wsdb_api.h f 644 root root $INCS_DEST/wsrep_api.h $BUILD_BASE/galera/src/wsrep_api.h d 755 root root $LIBS_DEST - f 755 root root $LIBS_DEST/libgalerautils.a $BUILD_BASE/galerautils/src/.libs/libgalerautils.a f 755 root root $LIBS_DEST/libgalerautils++.a $BUILD_BASE/galerautils/src/.libs/libgalerautils++.a %ifdef GCOMM #f 755 root root $LIBS_DEST/libgcomm.a $BUILD_BASE//gcomm/src/.libs/libgcomm.a %endif %ifdef VSBES f 755 root root $LIBS_DEST/libgcommcommonpp.a $BUILD_BASE/galeracomm/common/src/.libs/libgcommcommonpp.a f 755 root root $LIBS_DEST/libgcommtransportpp.a $BUILD_BASE/galeracomm/transport/src/.libs/libgcommtransportpp.a f 755 root root $LIBS_DEST/libgcommvspp.a $BUILD_BASE/galeracomm/vs/src/.libs/libgcommvspp.a %endif f 755 root root $LIBS_DEST/libgcs.a $BUILD_BASE/gcs/src/.libs/libgcs.a f 755 root root $LIBS_DEST/libwsdb.a $BUILD_BASE/wsdb/src/.libs/libwsdb.a f 755 root root $LIBS_DEST/libmmgalera.a $BUILD_BASE/galera/src/.libs/libmmgalera.a %format deb # Debian packages come with bad file ownership %postinstall < EOF } function main { set -x test $# -eq 1 || (usage && exit 1) local version="$1" build_deb "$version" } main $@ galera-3-25.3.20/scripts/packages/galera-obs.spec0000644000015300001660000002312313042054732021274 0ustar jenkinsjenkins# Copyright (c) 2011-2015, Codership Oy . # All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License or later. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to the # Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston # MA 02110-1301 USA. %{!?name: %define name galera-3} %{!?version: %define version 25_3.x} %{!?release: %define release 2} %define revision XXXX %define copyright Copyright 2007-2015 Codership Oy. All rights reserved. Use is subject to license terms under GPLv2 license. %define libs %{_libdir}/%{name} %define docs /usr/share/doc/%{name} # Avoid debuginfo RPMs, leaves binaries unstripped %global _enable_debug_package 0 %global debug_package %{nil} %global __os_install_post /usr/lib/rpm/brp-compress %{nil} # Define dist tag if not given by platform # For suse versions see: # https://en.opensuse.org/openSUSE:Build_Service_cross_distribution_howto %if 0%{?suse_version} == 1110 %define dist .sle11 %endif %if 0%{?suse_version} == 1310 %define dist .suse13.1 %endif %if 0%{?suse_version} == 1315 %define dist .sle12 %endif %if 0%{?suse_version} == 1320 %define dist .suse13.2 %endif Name: %{name} Summary: Galera: a synchronous multi-master wsrep provider (replication engine) Group: System Environment/Libraries Version: %{version} Release: %{release}%{dist} License: GPL-2.0 Source: %{name}-%{version}.tar.gz URL: http://www.codership.com/ Packager: Codership Oy Vendor: Codership Oy BuildRoot: %{_tmppath}/%{name}_%{version}-build BuildRequires: boost-devel >= 1.41 BuildRequires: check-devel BuildRequires: glibc-devel BuildRequires: openssl-devel BuildRequires: scons %if 0%{?suse_version} == 1110 # On SLES11 SPx use the linked gcc47 to build instead of default gcc43 BuildRequires: gcc47 gcc47-c++ # On SLES11 SP2 the libgfortran.3.so provider must be explicitly defined BuildRequires: libgfortran3 # On SLES11 we got error "conflict for provider of libgcc_s1 >= 4.7.4_20140612-2.1 # needed by gcc47, (provider libgcc_s1 conflicts with installed libgcc43), # conflict for provider of libgomp1 >= 4.7.4_20140612-2.1 needed by gcc47, # (provider libgomp1 conflicts with installed libgomp43), conflict for provider # of libstdc++6 >= 4.7.4_20140612-2.1 needed by libstdc++47-devel, # (provider libstdc++6 conflicts with installed libstdc++43) # therefore: BuildRequires: libgcc_s1 BuildRequires: libgomp1 BuildRequires: libstdc++6 #!BuildIgnore: libgcc43 %else BuildRequires: gcc-c++ %endif %if %{defined fedora} BuildRequires: python %endif # Systemd %if 0%{?suse_version} >= 1220 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 %define systemd 1 BuildRequires: systemd %else %define systemd 0 %endif %if 0%{?systemd} %{?systemd_requires} %if 0%{?suse_version} BuildRequires: systemd-rpm-macros # RedHat seems not to need this (or an equivalent). %endif %else # NOT systemd %if 0%{?suse_version} PreReq: %insserv_prereq %fillup_prereq %else Requires(post): chkconfig Requires(preun): chkconfig Requires(preun): initscripts %endif %endif # systemd Requires: openssl Provides: wsrep, %{name} = %{version}-%{release} Provides: galera, galera3, Percona-XtraDB-Cluster-galera-25 %description Galera is a fast synchronous multimaster wsrep provider (replication engine) for transactional databases and similar applications. For more information about wsrep API see http://launchpad.net/wsrep. For a description of Galera replication engine see http://www.codership.com. %{copyright} This software comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to modify and redistribute it under the GPLv2 license. %prep %setup -q -n %{name}-%{version} # When downloading from GitHub the contents is in a folder # that is named by the branch it was exported from. %build # Debug info: echo "suse_version: %{suse_version}" # 1110 = SLE-11 SPx %if 0%{?suse_version} == 1110 export CC=gcc-4.7 export CXX=g++-4.7 %endif %if 0%{?suse_version} == 1120 export CC=gcc-4.6 export CXX=g++-4.6 %endif %if 0%{?suse_version} == 1130 export CC=gcc-4.7 export CXX=g++-4.7 %endif NUM_JOBS=${NUM_JOBS:-$(ncpu=$(cat /proc/cpuinfo | grep processor | wc -l) && echo $(($ncpu > 4 ? 4 : $ncpu)))} scons -j$(echo $NUM_JOBS) revno=%{revision} deterministic_tests=1 %install RBR=$RPM_BUILD_ROOT # eg. rpmbuild/BUILDROOT/galera-3-3.x-33.1.x86_64 RBD=$RPM_BUILD_DIR/%{name}-%{version} # eg. rpmbuild/BUILD/galera-3.x # When downloading from GitHub the contents is in a folder # that is named by the branch it was exported from. # Clean up the BuildRoot first [ "$RBR" != "/" ] && [ -d $RBR ] && rm -rf $RBR; mkdir -p $RBR %if 0%{?systemd} install -D -m 644 $RBD/garb/files/garb.service $RBR%{_unitdir}/garb.service install -D -m 755 $RBD/garb/files/garb-systemd $RBR%{_bindir}/garb-systemd %else install -d $RBR%{_sysconfdir}/init.d install -m 755 $RBD/garb/files/garb.sh $RBR%{_sysconfdir}/init.d/garb %endif # Symlink required by SUSE policy for SysV init, still supported with systemd %if 0%{?suse_version} %if 0%{?systemd} install -d %{buildroot}%{_sbindir} ln -sf /usr/sbin/service %{buildroot}%{_sbindir}/rcgarb %else install -d $RBR/usr/sbin ln -sf /etc/init.d/garb $RBR/usr/sbin/rcgarb %endif # systemd %endif # suse_version %if 0%{?suse_version} install -d $RBR/var/adm/fillup-templates/ install -m 644 $RBD/garb/files/garb.cnf $RBR/var/adm/fillup-templates/sysconfig.garb %else install -d $RBR%{_sysconfdir}/sysconfig install -m 644 $RBD/garb/files/garb.cnf $RBR%{_sysconfdir}/sysconfig/garb %endif # suse_version install -d $RBR%{_bindir} install -m 755 $RBD/garb/garbd $RBR%{_bindir}/garbd install -d $RBR%{libs} install -m 755 $RBD/libgalera_smm.so $RBR%{libs}/libgalera_smm.so install -d $RBR%{docs} install -m 644 $RBD/COPYING $RBR%{docs}/COPYING install -m 644 $RBD/asio/LICENSE_1_0.txt $RBR%{docs}/LICENSE.asio install -m 644 $RBD/www.evanjones.ca/LICENSE $RBR%{docs}/LICENSE.crc32c install -m 644 $RBD/chromium/LICENSE $RBR%{docs}/LICENSE.chromium install -m 644 $RBD/scripts/packages/README $RBR%{docs}/README install -m 644 $RBD/scripts/packages/README-MySQL $RBR%{docs}/README-MySQL install -d $RBR%{_mandir}/man8 install -m 644 $RBD/man/garbd.8 $RBR%{_mandir}/man8/garbd.8 %if 0%{?systemd} %if 0%{?suse_version} %post %service_add_post garb %preun %service_del_preun garb %else # Not SuSE - so it must be RedHat, CentOS, Fedora %post %systemd_post garb.service %preun %systemd_preun garb.service %postun %systemd_postun_with_restart garb.service %endif # SuSE versus Fedora/RedHat/CentOS %else # NOT systemd %if 0%{?suse_version} # For the various macros and their parameters, see here: # https://en.opensuse.org/openSUSE:Packaging_Conventions_RPM_Macros %post %fillup_and_insserv garb %preun %stop_on_removal garb rm -f $(find %{libs} -type l) %postun %restart_on_update garb %insserv_cleanup %else # Not SuSE - so it must be RedHat, CentOS, Fedora %post /sbin/chkconfig --add garb %preun if [ "$1" = "0" ] then /sbin/service garb stop /sbin/chkconfig --del garb fi %postun # >=1 packages after uninstall -> pkg was updated -> restart if [ "$1" -ge "1" ] then /sbin/service garb restart fi %endif # SuSE versus Fedora/RedHat/CentOS %endif # systemd ? %files %defattr(-,root,root,0755) %if 0%{?suse_version} %config(noreplace,missingok) /var/adm/fillup-templates/sysconfig.garb %else %config(noreplace,missingok) %{_sysconfdir}/sysconfig/garb %endif %if 0%{?systemd} %attr(0644,root,root) %{_unitdir}/garb.service %attr(0755,root,root) %{_bindir}/garb-systemd %else %attr(0755,root,root) %{_sysconfdir}/init.d/garb %endif # Symlink required by SUSE policy for SysV init, still supported with systemd %if 0%{?suse_version} %attr(0755,root,root) /usr/sbin/rcgarb %endif %attr(0755,root,root) %{_bindir}/garbd %attr(0755,root,root) %dir %{libs} %attr(0755,root,root) %{libs}/libgalera_smm.so %attr(0755,root,root) %dir %{docs} %doc %attr(0644,root,root) %{docs}/COPYING %doc %attr(0644,root,root) %{docs}/LICENSE.asio %doc %attr(0644,root,root) %{docs}/LICENSE.crc32c %doc %attr(0644,root,root) %{docs}/LICENSE.chromium %doc %attr(0644,root,root) %{docs}/README %doc %attr(0644,root,root) %{docs}/README-MySQL %doc %attr(644, root, man) %{_mandir}/man8/garbd.8* %clean [ "$RPM_BUILD_ROOT" != "/" ] && [ -d $RPM_BUILD_ROOT ] && rm -rf $RPM_BUILD_ROOT; %changelog * Fri Feb 27 2015 Joerg Bruehe - Service name is "garb", reflect that in the config file (SuSE only, galera#235, Release 2) * Fri Feb 20 2015 Joerg Bruehe - Update copyright year. - Make the man page file name consistent with its section. * Wed Feb 11 2015 Joerg Bruehe - Add missing "prereq" directive and arguments for the various service control macros. - Handle the difference between SuSE and Fedora/RedHat/CentOS. - Fix systemd stuff, using info from these pages: https://en.opensuse.org/openSUSE:Systemd_packaging_guidelines http://fedoraproject.org/wiki/Packaging:Systemd http://fedoraproject.org/wiki/Packaging:ScriptletSnippets#Systemd * Tue Sep 30 2014 Otto Kekäläinen - 3.x - Initial OBS packaging created galera-3-25.3.20/scripts/packages/freebsd/0000755000015300001660000000000013042054732020015 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/packages/freebsd/galera-comment0000644000015300001660000000010713042054732022631 0ustar jenkinsjenkinsGalera: a synchronous multi-master wsrep provider (replication engine) galera-3-25.3.20/scripts/packages/freebsd/galera-mtree0000644000015300001660000004223013042054732022306 0ustar jenkinsjenkins# $FreeBSD: /tmp/pcvs/ports/Templates/BSD.local.dist,v 1.3 2010-11-12 20:57:14 pav Exp $ # # Please see the file src/etc/mtree/README before making changes to this file. # /set type=dir uname=root gname=wheel mode=0755 . bin .. etc devd .. man.d .. pam.d .. rc.d .. .. include X11 .. .. info .. lib X11 app-defaults .. fonts local .. .. .. .. libdata ldconfig .. ldconfig32 .. pkgconfig .. .. libexec .. man /set uname=man cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. de.ISO8859-1 uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. en.ISO8859-1 /set uname=man cat1 .. cat1aout .. cat2 .. cat3 .. cat4 i386 .. .. cat5 .. cat6 .. cat7 .. cat8 i386 .. .. cat9 i386 .. .. catn .. .. ja uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. ru.KOI8-R /set uname=man cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. .. sbin .. share aclocal .. dict .. doc ja .. .. emacs site-lisp .. .. examples .. java classes .. .. locale af LC_MESSAGES .. .. am LC_MESSAGES .. .. ar LC_MESSAGES .. .. az LC_MESSAGES .. .. be LC_MESSAGES .. .. bg LC_MESSAGES .. .. bn LC_MESSAGES .. .. br LC_MESSAGES .. .. bs LC_MESSAGES .. .. ca LC_MESSAGES .. .. cs LC_MESSAGES .. .. cy LC_MESSAGES .. .. da LC_MESSAGES .. .. de LC_MESSAGES .. .. de_AT LC_MESSAGES .. .. dk LC_MESSAGES .. .. ee LC_MESSAGES .. .. el LC_MESSAGES .. .. en LC_MESSAGES .. .. en_AU LC_MESSAGES .. .. en_CA LC_MESSAGES .. .. en_GB LC_MESSAGES .. .. eo LC_MESSAGES .. .. es LC_MESSAGES .. .. es_ES LC_MESSAGES .. .. es_MX LC_MESSAGES .. .. et LC_MESSAGES .. .. eu LC_MESSAGES .. .. fa LC_MESSAGES .. .. fa_IR LC_MESSAGES .. .. fi LC_MESSAGES .. .. fr LC_MESSAGES .. .. fr_FR LC_MESSAGES .. .. ga LC_MESSAGES .. .. gl LC_MESSAGES .. .. gu LC_MESSAGES .. .. he LC_MESSAGES .. .. hi LC_MESSAGES .. .. hr LC_MESSAGES .. .. hu LC_MESSAGES .. .. id LC_MESSAGES .. .. is LC_MESSAGES .. .. it LC_MESSAGES .. .. ja LC_MESSAGES .. .. ka LC_MESSAGES .. .. kn LC_MESSAGES .. .. ko LC_MESSAGES .. .. li LC_MESSAGES .. .. lt LC_MESSAGES .. .. lv LC_MESSAGES .. .. mk LC_MESSAGES .. .. ml LC_MESSAGES .. .. mn LC_MESSAGES .. .. ms LC_MESSAGES .. .. mt LC_MESSAGES .. .. nb LC_MESSAGES .. .. ne LC_MESSAGES .. .. nl LC_MESSAGES .. .. nn LC_MESSAGES .. .. no LC_MESSAGES .. .. or LC_MESSAGES .. .. pa LC_MESSAGES .. .. pl LC_MESSAGES .. .. pt LC_MESSAGES .. .. pt_BR LC_MESSAGES .. .. pt_PT LC_MESSAGES .. .. ro LC_MESSAGES .. .. ru LC_MESSAGES .. .. sk LC_MESSAGES .. .. sl LC_MESSAGES .. .. sq LC_MESSAGES .. .. sr LC_MESSAGES .. .. sr@Latn LC_MESSAGES .. .. sv LC_MESSAGES .. .. ta LC_MESSAGES .. .. tg LC_MESSAGES .. .. th LC_MESSAGES .. .. tk LC_MESSAGES .. .. tr LC_MESSAGES .. .. uk LC_MESSAGES .. .. uz LC_MESSAGES .. .. vi LC_MESSAGES .. .. wa LC_MESSAGES .. .. zh LC_MESSAGES .. .. zh_CN LC_MESSAGES .. .. zh_CN.GB2312 LC_MESSAGES .. .. zh_TW LC_MESSAGES .. .. zh_TW.Big5 LC_MESSAGES .. .. .. misc .. nls C .. af_ZA.ISO8859-1 .. af_ZA.ISO8859-15 .. af_ZA.UTF-8 .. am_ET.UTF-8 .. be_BY.CP1131 .. be_BY.CP1251 .. be_BY.ISO8859-5 .. be_BY.UTF-8 .. bg_BG.CP1251 .. bg_BG.UTF-8 .. ca_ES.ISO8859-1 .. ca_ES.ISO8859-15 .. ca_ES.UTF-8 .. cs_CZ.ISO8859-2 .. cs_CZ.UTF-8 .. da_DK.ISO8859-1 .. da_DK.ISO8859-15 .. da_DK.UTF-8 .. de_AT.ISO8859-1 .. de_AT.ISO8859-15 .. de_AT.UTF-8 .. de_CH.ISO8859-1 .. de_CH.ISO8859-15 .. de_CH.UTF-8 .. de_DE.ISO8859-1 .. de_DE.ISO8859-15 .. de_DE.UTF-8 .. el_GR.ISO8859-7 .. el_GR.UTF-8 .. en_AU.ISO8859-1 .. en_AU.ISO8859-15 .. en_AU.US-ASCII .. en_AU.UTF-8 .. en_CA.ISO8859-1 .. en_CA.ISO8859-15 .. en_CA.US-ASCII .. en_CA.UTF-8 .. en_GB.ISO8859-1 .. en_GB.ISO8859-15 .. en_GB.US-ASCII .. en_GB.UTF-8 .. en_IE.UTF-8 .. en_NZ.ISO8859-1 .. en_NZ.ISO8859-15 .. en_NZ.US-ASCII .. en_NZ.UTF-8 .. en_US.ISO8859-1 .. en_US.ISO8859-15 .. en_US.UTF-8 .. es_ES.ISO8859-1 .. es_ES.ISO8859-15 .. es_ES.UTF-8 .. et_EE.ISO8859-15 .. et_EE.UTF-8 .. fi_FI.ISO8859-1 .. fi_FI.ISO8859-15 .. fi_FI.UTF-8 .. fr_BE.ISO8859-1 .. fr_BE.ISO8859-15 .. fr_BE.UTF-8 .. fr_CA.ISO8859-1 .. fr_CA.ISO8859-15 .. fr_CA.UTF-8 .. fr_CH.ISO8859-1 .. fr_CH.ISO8859-15 .. fr_CH.UTF-8 .. fr_FR.ISO8859-1 .. fr_FR.ISO8859-15 .. fr_FR.UTF-8 .. he_IL.UTF-8 .. hi_IN.ISCII-DEV .. hr_HR.ISO8859-2 .. hr_HR.UTF-8 .. hu_HU.ISO8859-2 .. hu_HU.UTF-8 .. hy_AM.ARMSCII-8 .. hy_AM.UTF-8 .. is_IS.ISO8859-1 .. is_IS.ISO8859-15 .. is_IS.UTF-8 .. it_CH.ISO8859-1 .. it_CH.ISO8859-15 .. it_CH.UTF-8 .. it_IT.ISO8859-1 .. it_IT.ISO8859-15 .. it_IT.UTF-8 .. ja_JP.SJIS .. ja_JP.UTF-8 .. ja_JP.eucJP .. kk_KZ.PT154 .. kk_KZ.UTF-8 .. ko_KR.CP949 .. ko_KR.UTF-8 .. ko_KR.eucKR .. la_LN.ISO8859-1 .. la_LN.ISO8859-15 .. la_LN.ISO8859-2 .. la_LN.ISO8859-4 .. la_LN.US-ASCII .. lt_LT.ISO8859-13 .. lt_LT.ISO8859-4 .. lt_LT.UTF-8 .. nl_BE.ISO8859-1 .. nl_BE.ISO8859-15 .. nl_BE.UTF-8 .. nl_NL.ISO8859-1 .. nl_NL.ISO8859-15 .. nl_NL.UTF-8 .. no_NO.ISO8859-1 .. no_NO.ISO8859-15 .. no_NO.UTF-8 .. pl_PL.ISO8859-2 .. pl_PL.UTF-8 .. pt_BR.ISO8859-1 .. pt_BR.UTF-8 .. pt_PT.ISO8859-1 .. pt_PT.ISO8859-15 .. pt_PT.UTF-8 .. ro_RO.ISO8859-2 .. ro_RO.UTF-8 .. ru_RU.CP1251 .. ru_RU.CP866 .. ru_RU.ISO8859-5 .. ru_RU.KOI8-R .. ru_RU.UTF-8 .. sk_SK.ISO8859-2 .. sk_SK.UTF-8 .. sl_SI.ISO8859-2 .. sl_SI.UTF-8 .. sr_YU.ISO8859-2 .. sr_YU.ISO8859-5 .. sr_YU.UTF-8 .. sv_SE.ISO8859-1 .. sv_SE.ISO8859-15 .. sv_SE.UTF-8 .. tr_TR.ISO8859-9 .. tr_TR.UTF-8 .. uk_UA.ISO8859-5 .. uk_UA.KOI8-U .. uk_UA.UTF-8 .. zh_CN.GB18030 .. zh_CN.GB2312 .. zh_CN.GBK .. zh_CN.UTF-8 .. zh_CN.eucCN .. zh_HK.Big5HKSCS .. zh_HK.UTF-8 .. zh_TW.Big5 .. zh_TW.UTF-8 .. .. pixmaps .. sgml .. skel .. xml .. .. www .. .. galera-3-25.3.20/scripts/packages/freebsd/LICENSE0000644000015300001660000000012113042054732021014 0ustar jenkinsjenkinsThis package has a single license: GPLv2 (GNU General Public License version 2). galera-3-25.3.20/scripts/packages/freebsd/catalog.mk0000644000015300001660000000032313042054732021756 0ustar jenkinsjenkins_LICENSE=GPLv2 _LICENSE_NAME=GNU General Public License version 2 _LICENSE_PERMS=dist-mirror dist-sell pkg-mirror pkg-sell auto-accept _LICENSE_GROUPS=FSF GPL OSI _LICENSE_DISTFILES=galera-%{RELEASE}-src.tar.gz galera-3-25.3.20/scripts/packages/freebsd/galera-ldconfig0000644000015300001660000000002613042054732022754 0ustar jenkinsjenkins/usr/local/lib/galera galera-3-25.3.20/scripts/packages/freebsd/galera-descr0000644000015300001660000000046513042054732022276 0ustar jenkinsjenkinsGalera is a fast synchronous multimaster wsrep provider (replication engine) for transactional databases and similar applications. For more information about wsrep API see http://launchpad.net/wsrep. For a description of Galera replication engine see http://www.codership.com. WWW: http://www.codership.com/ galera-3-25.3.20/scripts/packages/freebsd/galera-message0000644000015300001660000000075713042054732022626 0ustar jenkinsjenkins************************************************************************ If you want to run Galera Arbitrator Daemon (garbd), remember to configure garb service in /etc/rc.conf, e.g.: garb_enable="YES" garb_galera_nodes="1.1.1.1:4567" garb_galera_group="wsrep_cluster_name" garb_galera_options="gmcast.listen_addr=tcp://2.2.2.2:4567" garb_log_file="/tmp/garb.log" To start garbd, use command: sudo service garb start ************************************************************************ galera-3-25.3.20/scripts/packages/freebsd/galera-plist0000644000015300001660000000265113042054732022330 0ustar jenkinsjenkins@comment PKG_FORMAT_REVISION:1.1 @name galera-%{RELEASE} @comment ORIGIN:databases/galera @cwd /usr/local @srcdir %{SRCDIR} @comment "=== dependencies ===" @comment "require /usr/local/lib/gcc48/libstdc++.so" @comment // @pkgdep gcc-4.8.2.s20130808 @comment DEPORIGIN:lang/gcc48 @comment // @pkgdep openssl-1.0.1_8 @comment DEPORIGIN:security/openssl @comment // @pkgdep libexecinfo-1.1_3 @comment DEPORIGIN:devel/libexecinfo @comment "=== preinstall stage ===" @exec echo "===> Linking /usr/local/bin/bash to /bin/bash" @exec [ -x /bin/bash ] && echo "Using existing /bin/bash." || ln -s ../usr/local/bin/bash /bin/bash @comment "=== file section ===" @owner root @group wheel @mode 0444 share/licenses/galera-%{RELEASE}/catalog.mk share/licenses/galera-%{RELEASE}/LICENSE share/licenses/galera-%{RELEASE}/GPLv2 share/licenses/galera-%{RELEASE}/LICENSE.asio share/licenses/galera-%{RELEASE}/LICENSE.crc32c share/licenses/galera-%{RELEASE}/LICENSE.chromium @mode 0555 etc/rc.d/garb bin/garbd @mode 0444 lib/galera/libgalera_smm.so share/doc/galera/README share/doc/galera/README-MySQL libdata/ldconfig/galera @comment "=== postinstall stage ===" @exec /sbin/ldconfig -m /usr/local/lib/galera @comment "=== postremove stage ===" @dirrm share/licenses/galera-%{RELEASE} @dirrm share/doc/galera @comment // @unexec rm -f $(find /usr/local/lib/galera -type l)" @dirrm lib/galera @comment // @unexec ldconfig -R @unexec service ldconfig start >/dev/null galera-3-25.3.20/scripts/packages/empty0000644000015300001660000000000013042054732017452 0ustar jenkinsjenkinsgalera-3-25.3.20/scripts/packages/debian0000777000015300001660000000000013042054732021340 2../../debianustar jenkinsjenkinsgalera-3-25.3.20/scripts/packages/galera.list0000644000015300001660000000276013042054732020540 0ustar jenkinsjenkins# This is Galera package description for ESP package manager %include galera-common.inc # this line is required by rpmbuild #d 755 root root /usr d 755 root root $CONF_DEST - c 644 root root $CONF_DEST/garb $BUILD_BASE/garb/files/garb.cnf # i 755 root sys foo foo.sh - this creates links in rc*.d, we dont want it d 755 root root $INIT_DEST - f 755 root root $INIT_DEST/garb $BUILD_BASE/garb/files/garb.sh d 755 root root $BINS_DEST - f 755 root root $BINS_DEST/garbd $BUILD_BASE/garb/garbd d 755 root root $LIBS_DEST - f 755 root root $LIBS_DEST/libgalera_smm.so $BUILD_BASE/libgalera_smm.so d 755 root root $DOCS_DEST - f 644 root root $DOCS_DEST/COPYING $BUILD_BASE/LICENSE f 644 root root $DOCS_DEST/LICENSE.asio $BUILD_BASE/asio/LICENSE_1_0.txt f 644 root root $DOCS_DEST/LICENSE.crc32c $BUILD_BASE/www.evanjones.ca/LICENSE f 644 root root $DOCS_DEST/LICENSE.chromium $BUILD_BASE/chromium/LICENSE f 644 root root $DOCS_DEST/README README f 644 root root $DOCS_DEST/README-MySQL README-MySQL $LD_SO_CONF_D=/etc/ld.so.conf.d d 755 root root $LD_SO_CONF_D - # the reason we don't create this file in postinstall script is to have it # in the package database f 644 root root $LD_SO_CONF_D/galera.conf empty %postinstall < $LD_SO_CONF_D/galera.conf ldconfig $LIBS_DEST EOF_POSTINSTALL %preremove < DISCLAIMER THIS SOFTWARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL CODERSHIP OY BE HELD LIABLE TO ANY PARTY FOR ANY DAMAGES RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE. Trademark Information. All trademarks are the property of their respective owners. Licensing Information. Galera is copyright (c) 2007-2013 Codership Oy Please see COPYING file that came with this distribution. This product uses asio C++ library which is Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com) licensed under Boost Software License. This product uses CRC-32C implementation from http://www.evanjones.ca which is licensed under MIT License. This product uses aligned buffer implementation from Chromium Project which is copyright Chromium Authors and licensed under Chromium License Source code can be found at https://launchpad.net/galera GALERA v3.x CONTENTS: ========= 1. WHAT IS GALERA 2. GALERA USE CASES 3. GALERA CONFIGURATION PARAMETERS 4. GALERA ARBITRATOR 5. WRITESET CACHE 6. INCREMENTAL STATE TRANSFER 7. SPECIAL NOTES 1. WHAT IS GALERA Galera is a synchronous multi-master replication engine that provides its service through wsrep API (https://launchpad.net/wsrep). It features optimistic transaction execution and commit time replication and certification of writesets. Since it replicates only final changes to the database it is transparent to triggers, stored procedures and non-deterministic functions. Galera nodes are connected to each other in a N-to-N fashion through a group communication backend which provides automatic reconfiguration in the event of a node failure or a new node added to cluster: ,-------. ,-------. ,--------. | node1 |-----| node2 |<---| client | `-------' G `-------' `--------' \ / ,-------. ,--------. | node3 |<---| client | `-------' `--------' Node states are synchronized by replicating transaction changes at commit time. The cluster is virtually synchronous: this means that each node commits transactions in exactly the same order, although not necessarily at the same physical moment. (The latter is not that important as it may seem, since in most cases DBMS gives no guarantee on when the transaction is actually processed.) Built-in flow control keeps nodes within fraction of a second from each other, this is more than enough for most practical purposes. Main features of a Galera database cluster: * Truly highly available: no committed transaction is ever lost in case of a node crash. All nodes always have consistent state. * True multi-master: all cluster nodes can handle WRITE load concurrently. * Highly transparent. (See SPECIAL NOTES below) * Scalable even with WRITE-intensive applications. * Automatic synchronization of new nodes. 2. GALERA USE CASES There is a number of ways how Galera replication can be utilized. They can be categorized in three groups: 1) Seeking High Availability only. In this case client application connects to only one node, the rest serving as hot backups: ,-------------. | application | `-------------' | | | DB backups ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <===== cluster nodes =====> In the case of primary node failure or maintenance shutdown application can instantly switch to another node without any special failover procedure. 2) Seeking High Availability and improved performance through uniform load distribution. If there are several client connections to the database, they can be uniformly distributed between cluster nodes resulting in better performance. The exact degree of performance improvement depends on application's load profile. Note, that transaction rollback rate may also increase. ,-------------. | clients | `-------------' | | | | ,-------------. | application | `-------------' / | \ ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <===== cluster nodes =====> In the case of a node failure application can keep on using the remaining healthy nodes. In this setup application can also be clustered with a dedicated application instance per database node, thus achieving HA not only for the database, but for the whole application stack: ,-------------. | clients | `-------------' // || \\ ,------. ,------. ,------. | app1 | | app2 | | app3 | `------' `------' `------' | | | ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <====== cluster nodes ======> 3) Seeking High Availability and improved performance through smart load distribution. Uniform load distribution can cause undesirably high rollback rate. Directing transactions which access the same set of tables to the same node can considerably improve performance by reducing the number of rollbacks. Also, if your application can distinguish between read/write and read-only transactions, the following configuration may be quite efficient: ,---------------------. | application | `---------------------' writes / | reads \ reads ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <========= cluster nodes =========> 3. GALERA PARAMETERS 3.1 Cluster URL. Galera can use URL (RFC 3986) syntax for addressing with optional parameters passed in the URL query part. Galera cluster address looks as follows: ://[?option1=value1[&option2=value2]] e.g.: gcomm://192.168.0.1:4567?gmcast.listen_addr=0.0.0.0:5678 Currently Galera supports the following backends: 'dummy' - is a bypass backend for debugging/profiling purposes. It does not connect to or replicate anything and the rest of the URL address string is ignored. 'gcomm' - is a Codership's own Group Communication backend that provides Virtual Synchrony quality of service. It uses TCP for membership service and TCP (and UDP multicast as of version 0.8) for data replication. Normally one would use just the simplest form of the address URL: gcomm:// - if one wants to start a new cluster. gcomm://
- if one wants to join an existing cluster. In that case
is the address of one of the cluster members 3.2 Galera parameters. There is quite a few galera configuration parameters which affect its behavior and performance. Of particular interest to an end-user are the following: To configure gcomm listen address: gmcast.listen_addr To configure how fast cluster reacts on node failure or connection loss: evs.suspect_timeout evs.inactive_timeout evs.inactive_check_period evs.keepalive_period To fine-tune performance (especially in high latency networks): evs.user_send_window evs.send_window To relax or tighten replication flow control: gcs.fc_limit gcs.fc_factor For a full parameter list please see http://www.codership.com/wiki/doku.php?id=galera_parameters 3.2.1 GMCast parameters group. All parameters in this group are prefixed by 'gmcast.' (see example above). group String denoting group name. Max length of string is 16. Peer nodes accept GMCast connection only if the group names match. It is set automatically from wsrep options. listen_addr Listening address for GMCast. Address is currently passed in URI format (for example tcp://192.168.3.1:4567) and it should be passed as the last configuration parameter in order to avoid confusion. If parameter value is undefined, GMCast starts listening all interfaces at default port 4567 mcast_addr Multicast address in dotted decimal notation, enables using multicast to transmit group communication messages. Defaults to none. Must have the same value on all nodes. mcast_port Port used for UDP multicast messages. Defaults to listen_addr port. Must have same value on all of the nodes. mcast_ttl Time to live for multicast packets. Defaults to 1. 3.2.2 EVS parameter group. All parameters in this group are prefixed by 'evs.'. All values for the timeout options below should follow ISO 8601 standard for the time interval representation (e.g. 02:01:37.2 -> PT2H1M37.2S == PT121M37.2S == PT7297.2S) suspect_timeout This timeout controls how long node can remain silent until it is put under suspicion. If majority of the current group agree that the node is under suspicion, it is discarded from group and new group view is formed immediately. If majority of the group does not agree about suspicion, is waited until forming of new group will be attempted. Default value is 5 seconds. inactive_timeout This timeout control how long node can remain completely silent until it is discarded from the group. This is hard limit, unlike , and the node is discarded even if it becomes live during the formation of the new group (so it is inclusive of ). Default value is 15 seconds. inactive_check_period This period controls how often node liveness is checked. Default is 1 second and there is no need to change this unless or is adjusted to smaller value. Minimum is 0.1 seconds and maximum is /2. keepalive_period This timeout controls how often keepalive messages are sent into network. Node liveness is determined with these keepalives, so the value sould be considerably smaller than . Default value is 1 second, minimum is 0.1 seconds and maximum is /3. consensus_timeout This timeout defines how long forming of new group is attempted. If there is no consensus after this time has passed since starting of consensus protocol, every node discards all other nodes from the group and forming of new group is attempted through singleton groups. Default value is 30 seconds, minimum is and maximum is *5. join_retrans_period This parameter controls how often join messages are retransmitted during group formation. There is usually no need to adjust this value. Default value is 0.3 seconds, minimum is 0.1 seconds and maximum is /3. view_forget_timeout This timeout controls how long information about known group views is maintained. This information is needed to filter out delayed messages from previous views that are not live anymore. Default value is 5 minutes and there is usually no need to change it. debug_log_mask This mask controls what debug information is printed in the logs if debug logging is turned on. Mask value is bitwise-OR from values gcomm::evs::Proto::DebugFlags. By default only state information is printed. info_log_mask This mask controls what info log is printed in the logs. Mask value is bitwise-or from values gcomm::evs::Proto::InfoFlags. stats_report_period This parameter controls how often statistics information is printed in the log. This parameter has effect only if statistics reporting is enabled via Conf::EvsInfoLogMask. Default value is 1 minute. send_window This parameter controls how many messages protocol layer is allowed to send without getting all acknowledgements for any of them. Default value is 32. user_send_window Like , but for messages which sending is initiated by a call from the upper layer. Default value is 16. 3.2.3 GCS parameter group All parameters in this group are prefixed by 'gcs.'. fc_debug Post debug statistics about SST flow control every that many writesets. Default: 0. fc_factor Resume replication after recv queue drops below that fraction of gcs.fc_limit. For fc_master_slave = NO this is limit is also scaled. Default: 1.0. fc_limit Pause replication if recv queue exceeds that many writesets. Default: 16. For master-slave setups this number can be increased considerably. fc_master_slave When this is NO then the effective gcs.fc_limit is multipled by sqrt( number of cluster members ). Default: NO. sync_donor Should we enable flow control in DONOR state the same way as in SYNCED state. Useful for non-blocking state transfers. Default: NO. max_packet_size All writesets exceeding that size will be fragmented. Default: 32616. max_throttle How much we can throttle replication rate during state transfer (to avoid running out of memory). Set it to 0.0 if stopping replication is acceptable for the sake of completing state transfer. Default: 0.25. recv_q_hard_limit Maximum allowed size of recv queue. This should normally be half of (RAM + swap). If this limit is exceeded, Galera will abort the server. Default: LLONG_MAX. recv_q_soft_limit A fraction of gcs.recv_q_hard_limit after which replication rate will be throttled. Default: 0.25. The degree of throttling is a linear function of recv queue size and goes from 1.0 (“full rate”) at gcs.recv_q_soft_limit to gcs.max_throttle at gcs.recv_q_hard_limit. Note that “full rate”, as estimated between 0 and gcs.recv_q_soft_limit is a very approximate estimate of a regular replication rate. 3.2.4 Replicator parameter group All parameters in this group are prefixed by 'replicator.'. commit_order Whether we should allow Out-Of-Order committing (improves parallel applying performance). Possible settings: 0 – BYPASS: all commit order monitoring is turned off (useful for measuring performance penalty) 1 – OOOC: allow out of order committing for all transactions 2 – LOCAL_OOOC: allow out of order committing only for local transactions 3 – NO_OOOC: no out of order committing is allowed (strict total order committing) Default: 3. 3.2.5 GCache parameter group All parameters in this group are prefixed by 'gcache.'. dir Directory where GCache should place its files. Default: working directory. name Name of the main store file (ring buffer). Default: “galera.cache”. size Size of the main store file (ring buffer). This will be preallocated on startup. Default: 128Mb. page_size Size of a page in the page store. The limit on overall page store is free disk space. Pages are prefixed by “gcache.page”. Default: 128Mb. keep_pages_size Total size of the page store pages to keep for caching purposes. If only page storage is enabled, one page is always present. Default: 0. mem_size Size of the malloc() store (read: RAM). For configurations with spare RAM. Default: 0. 3.2.6 SSL parameters All parameters in this group are prefixed by 'socket.'. ssl_cert Certificate file in PEM format. ssl_key A private key for the certificate above, unencrypted, in PEM format. ssl A boolean value to disable SSL even if certificate and key are configured. Default: yes (SSL is enabled if ssl_cert and ssl_key are set) To generate private key/certificate pair the following command may be used: $ openssl req -new -x509 -days 365000 -nodes -keyout key.pem -out cert.pem Using short-living (in most web examples - 1 year) certificates is not advised as it will lead to complete cluster shutdown when certificate expires. 3.2.7 Incremental State Transfer parameters All parameters in this group are prefixed by 'ist.'. recv_addr Address to receive incremental state transfer at. Setting this parameter turns on incremental state transfer. IST will use SSL if SSL is configured as described above. No default. recv_bind Address to which the incremental state transfer is bound. This configuration is optional. When it is not set it will take its value from recv_addr. It can be useful if the node is running behind a NAT, where the public address and the internal address differ. 4. GALERA ARBITRATOR Galera arbitrator found in this package is a small stateless daemon that can serve as a lightweight cluster member to * avoid split brain condition in a cluster with an otherwise even membership. * request consistent state snapshots for backup purposes. Example usage: ,---------. | garbd | `---------' ,---------. | ,---------. | clients | | | clients | `---------' | `---------' \ | / \ ,---. / (' `) ( WAN ) (. ,) / `---' \ / \ ,---------. ,---------. | node1 | | node2 | `---------' `---------' Data Center 1 Data Center 2 In this example, if one of the data centers loses WAN connection, the node that sees arbitrator (and therefore sees clients) will continue the operation. garbd accepts the same Galera options as the regular Galera node. Note that at the moment garbd needs to see all replication traffic (although it does not store it anywhere), so placing it in a location with poor connectivity to the rest of the cluster may lead to cluster performance degradation. Arbitrator failure does not affect cluster operation and a new instance can be reattached to cluster at any time. There can be several arbitrators in the cluster, although practicality of it is questionable. 5. WRITESET CACHE Starting with version 1.0 Galera stores writesets in a special cache. It's purpose is to improve control of Galera memory usage and offload writeset storage to disk. Galera cache has 3 types of stores: 1. A permanent in-memory store, where writesets are allocated by a default OS memory allocator. It can be useful for systems with spare RAM. It has a hard size limit. By default it is disabled (size set to 0). 2. A permanent ring-buffer file which is preallocated on disk during cache initialization. It is intended as the main writeset store. By default its size is 128Mb. 3. An on-demand page store, which allocates memory-mapped page files during runtime as necessary. Default page size is 128Mb, but it can be bigger if it needs to store a bigger writeset. The size of page store is limited by the free disk space. By default page files are deleted when not in use, but a limit can be set on the total size of the page files to keep. When all other stores are disabled, at least one page file is always present on disk. Allocation algorithm is as follows: all stores are tried in the above order. If a store does not have enough space to allocate the writeset, then the next store is tried. Page store should always succeed unless the writeset is bigger than the available disk space. By default Galera cache allocates files in the working directory of the process, but a dedicated location can be specified. For configuration parameters see GCache group above (p. 3.2.5). NOTE: Since all cache files are memory-mapped, the process may appear to use more memory than it actually does. 6. INCREMENTAL STATE TRANSFER (IST) Galera 2.x introduces a long awaited functionality: incremental state transfer. The idea is that if a) the joining node state UUID is the same as that of the group and b) all of the writesets that it missed can be found in the donor's Gcache then instead of whole state snapshot it will receive the missing writesets and catch up with the group by replaying them. For example: - local node state is 5a76ef62-30ec-11e1-0800-dba504cf2aab:197222 - group state is 5a76ef62-30ec-11e1-0800-dba504cf2aab:201913 - if writeset number 197223 is still in the donor's GCache, it will send writests 197223-201913 to joiner instead of the whole state. IST can dramatically speed up remerging node into cluster. It also non-blocking on the donor. The most important parameter for IST (besides 'ist.recv_addr') is GCache size on donor. The bigger it is, the more writesets can be stored in it and the bigger seqno gaps can be closed with IST. On the other hand, if GCache is much bigger than the state size, serving IST may be less efficient than sending state snapshot. 7. SPECIAL NOTES 7.1 DEADLOCK ON COMMIT In multi-master mode transaction commit operation may return a deadlock error. This is a consequence of writeset certification and is a fundamental property of Galera. If deadlock on commit cannot be tolerated by application, Galera can still be used on a condition that all write operations to a given table are performed on the same node. This still has an advantage over the "traditional" master-slave replication: write load can still be distributed between nodes and since replication is synchronous, failover is trivial. 7.2 "SPLIT-BRAIN" CONDITION Galera cluster is fully distributed and does not use any sort of centralized arbitrator, thus having no single point of failure. However, like any cluster of that kind it may fall to a dreaded "split-brain" condition where half of the cluster nodes suddenly disappear (e.g. due to network failure). In general case, having no information about the fate of disappeared nodes remaining nodes cannot continue to process requests and modify their states. While such situation is generally considered negligibly probable in a multi-node cluster (normally nodes fail one at a time), in 2-node cluster a single node failure can lead to this, thus making 3 nodes a minimum requirement for a highly-available cluster. Galera arbitrator (see above) can serve as an odd stateless cluster node to help avoid the possibility of an even cluster split. galera-3-25.3.20/scripts/packages/rpm.sh0000755000015300001660000000250513042054732017542 0ustar jenkinsjenkins#!/bin/bash -eu if [ $# -ne 1 ] then echo "Usage: $0 " exit 1 fi set -x # Absolute path of this script folder SCRIPT_ROOT=$(cd $(dirname $0); pwd -P) THIS_DIR=$(pwd -P) RPM_TOP_DIR=$SCRIPT_ROOT/rpm_top_dir rm -rf $RPM_TOP_DIR mkdir -p $RPM_TOP_DIR/RPMS ln -s ../../../ $RPM_TOP_DIR/BUILD fast_cflags="-O3 -fno-omit-frame-pointer" uname -m | grep -q i686 && \ cpu_cflags="-mtune=i686" || cpu_cflags="-mtune=core2" RPM_OPT_FLAGS="$fast_cflags $cpu_cflags" GALERA_SPEC=$SCRIPT_ROOT/galera.spec RELEASE=${RELEASE:-"1"} if [ -r /etc/fedora-release ] then DISTRO_VERSION=fc$(rpm -qf --qf '%{version}\n' /etc/fedora-release) elif [ -r /etc/redhat-release ] then DISTRO_VERSION=rhel$(rpm -qf --qf '%{version}\n' /etc/redhat-release) elif [ -r /etc/SuSE-release ] then DISTRO_VERSION=sles$(rpm -qf --qf '%{version}\n' /etc/SuSE-release | cut -d. -f1) else DISTRO_VERSION= fi [ -n "$DISTRO_VERSION" ] && RELEASE=$RELEASE.$DISTRO_VERSION $(which rpmbuild) --clean --define "_topdir $RPM_TOP_DIR" \ --define "optflags $RPM_OPT_FLAGS" \ --define "version $1" \ --define "release $RELEASE" \ -bb --short-circuit -bi $GALERA_SPEC RPM_ARCH=$(uname -m | sed s/i686/i386/) mv $RPM_TOP_DIR/RPMS/$RPM_ARCH/galera-*.rpm ./ rm -rf $RPM_TOP_DIR exit 0 galera-3-25.3.20/scripts/packages/README-MySQL0000644000015300001660000001341413042054732020231 0ustar jenkinsjenkinsCodership Oy http://www.codership.com DISCLAIMER THIS SOFTWARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL CODERSHIP OY BE HELD LIABLE TO ANY PARTY FOR ANY DAMAGES RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE. Trademark Information. MySQL and other trademarks are the property of their respective owners. Licensing Information. Please see COPYING file that came with this distribution. Source code can be found at http://www.codership.com/en/downloads/galera ABOUT THIS DOCUMENT This document briefly explains how to use Galera wsrep provider with MySQL RDBMS. For more information check http://www.codership.com/wiki/doku.php Using MySQL with GALERA v2.x CONTENTS: ========= 1. WHAT IS MYSQL/GALERA CLUSTER 2. MYSQL/GALERA NODE SETUP 3. CONNECTING TO CLUSTER 4. LIMITATIONS 1. WHAT IS MYSQL/GALERA CLUSTER MySQL/Galera cluster is a synchronous multi-master cluster solution for InnoDB engine based on wsrep API (https://launchpad.net/wsrep) which Galera is implementation of. It requires MySQL server patched to use this API (https://launchpad.net/codership-mysql). Node states are synchronized by replicating transaction changes at commit time. The cluster is virtually synchronous: this means that each node commits transactions in exactly the same order, although not necessarily at the same physical moment. (The latter is not that important as it may seem, since in most cases DBMS gives no guarantee on when the transaction is actually processed.) Built-in flow control keeps nodes within fraction of a second from each other, this is more than enough for most practical purposes. From the client perspective that means: * Truly highly available: no committed transaction is ever lost in case of a node crash. * Highly transparent: with few exceptions each node can be treated as a normal standalone MySQL/InnoDB server. * True multi-master: all cluster nodes can modify the same table concurrently. * Scalable even with WRITE-intensive applications. 2. MYSQL/GALERA NODE SETUP To setup MySQL/Galera node you will need to 1) Install wsrep-patched MySQL from https://launchpad.net/codership-mysql/0.7. Please see the documentation that comes with it about first time node setup. 2) Configure it to use Galera library as a wsrep provider. For that set wsrep_provider option to wsrep_provider=/usr/lib/galera/libgalera_smm.so (DEB systems) or wsrep_provider=/usr/lib64/galera/libgalera_smm.so (RPM systems). 3) Start MySQL server. 3. CONNECTING TO CLUSTER To join the cluster you will need to set global wsrep_cluster_address variable either in wsrep.cnf file or from the command line (the latter is recommended to avoid automatic connection to failed nodes). E.g. mysql> SET GLOBAL wsrep_cluster_address='gcomm://'; to bootstrap a new cluster, or mysql> SET GLOBAL wsrep_cluster_address='gcomm://:4567'; to join existing cluster. Upon connecting to initial node, the server will obtain the list of other nodes and connect to each of them. There is a number of options that can be passed to Galera with the wsrep_cluster_address string. See README for details. After connecting to cluster the new node will synchronize its state by receiving state snapshot form one of the peers. For the duration of this procedure both nodes may be unable to process client requests. Other nodes are unaffected by this. 4. SETTING GALERA PARAMETERS IN MYSQL To configure Galera parameteres set wsrep_provider_options variable either in my.cnf or in the command line: wsrep_provider_options="gcs.fc_limit = 128; gcs.fc_master_slave = yes" Certain parameteres can be changed in runtime: mysql> SET GLOBAL wsrep_provider_options="evs.send_window=16"; will only change the value of evs.send_window parameter. For the list of all Galera parameters see main README and http://www.codership.com/wiki. mysql> SHOW VARIABLES like 'wsrep_provider_options'; Will show all parameters and their current values. 5. LIMITATIONS 1) Currently replication works only with InnoDB storage engine. Any writes to tables of other types, including system (mysql.*) tables are not replicated. However, DDL statements are replicated in statement level, and changes to mysql.* tables will get replicated that way. So, you can safely issue: CREATE USER..., but issuing: INSERT INTO mysql.user..., will not be replicated. 2) DELETE operation is unsupported on tables without primary keys. Rows in tables without primary keys may appear in different order on different nodes. As a result SELECT...LIMIT... may return slightly different sets. 3) Unsupported queries: * LOCK/UNLOCK tables is not supported in multimaster configuration. * lock functions (GET_LOCK(), RELEASE_LOCK()... ) 4) Query log cannot be directed to table. If you enable query logging, you must forward the log to a file: log_output = FILE Use general_log and general_log_file to choose query logging and the log file name 5) Maximum allowed transaction size is defined by wsrep_max_ws_rows and wsrep_max_ws_size. Anything bigger (e.g. huge LOAD DATA) will be rejected. 6) Due to cluster level optimistic concurrency control, transaction issuing COMMIT may still be aborted at that stage. There can be two transactions writing to same rows and committing in separate cluster nodes, and only one of the them can successfully commit. The failing one will be aborted. For cluster level aborts, MySQL/galera cluster gives back deadlock error code (Error: 1213 SQLSTATE: 40001 (ER_LOCK_DEADLOCK)). 7) XA transactions can not be supported due to possible rollback on commit. galera-3-25.3.20/scripts/build.sh0000755000015300001660000002322513042054732016267 0ustar jenkinsjenkins#!/bin/bash -eu # $Id$ get_cores() { case $OS in "Linux") echo "$(grep -c ^processor /proc/cpuinfo)" ;; "SunOS") echo "$(psrinfo | wc -l | tr -d ' ')" ;; "Darwin" | "FreeBSD") echo "$(sysctl -n hw.ncpu)" ;; *) echo "CPU information not available: unsupported OS: '$OS'" >/dev/stderr echo 1 ;; esac } usage() { cat << EOF Usage: build.sh [OPTIONS] Options: --stage --last-stage -s|--scratch build everything from scratch -c|--configure reconfigure the build system (implies -s) -b|--bootstap rebuild the build system (implies -c) -o|--opt configure build with debug disabled (implies -c) -d|--debug configure build with debug enabled (implies -c) --dl set debug level for Scons build (1, implies -c) -r|--release release number -m32/-m64 build 32/64-bit binaries for x86 -p|--package build RPM and DEB packages at the end. --with-spread configure build with spread backend (implies -c to gcs) --source build source packages --sb skip actual build, use the existing binaries --scons build using Scons build system (yes) --so Sconscript option -j|--jobs how many parallel jobs to use for Scons (1) "\nSet DISABLE_GCOMM/DISABLE_VSBES to 'yes' to disable respective modules" EOF } OS=$(uname) # disable building vsbes by default DISABLE_VSBES=${DISABLE_VSBES:-"yes"} DISABLE_GCOMM=${DISABLE_GCOMM:-"no"} PACKAGE=${PACKAGE:-"no"} SKIP_BUILD=${SKIP_BUILD:-"no"} RELEASE=${RELEASE:-""} SOURCE=${SOURCE:-"no"} DEBUG=${DEBUG:-"no"} DEBUG_LEVEL=${DEBUG_LEVEL:-"0"} SCONS=${SCONS:-"yes"} SCONS_OPTS=${SCONS_OPTS:-""} export JOBS=${JOBS:-"$(get_cores)"} SCRATCH=${SCRATCH:-"no"} OPT="yes" NO_STRIP=${NO_STRIP:-"no"} WITH_SPREAD="no" RUN_TESTS=${RUN_TESTS:-1} if [ "$OS" == "FreeBSD" ]; then chown=/usr/sbin/chown true=/usr/bin/true epm=/usr/local/bin/epm else chown=/bin/chown true=/bin/true epm=/usr/bin/epm fi EXTRA_SYSROOT=${EXTRA_SYSROOT:-""} if [ "$OS" == "Darwin" ]; then if which -s port && test -x /opt/local/bin/port; then EXTRA_SYSROOT=/opt/local elif which -s brew && test -x /usr/local/bin/brew; then EXTRA_SYSROOT=/usr/local elif which -s fink && test -x /sw/bin/fink; then EXTRA_SYSROOT=/sw fi elif [ "$OS" == "FreeBSD" ]; then EXTRA_SYSROOT=/usr/local fi which dpkg >/dev/null 2>&1 && DEBIAN=${DEBIAN:-1} || DEBIAN=${DEBIAN:-0} if [ "$OS" == "FreeBSD" ]; then CC=${CC:-"gcc48"} CXX=${CXX:-"g++48"} LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-"/usr/local/lib/$(basename $CC)"} else CC=${CC:-"gcc"} CXX=${CXX:-"g++"} fi if ccache -V > /dev/null 2>&1 then echo "$CC" | grep "ccache" > /dev/null || CC="ccache $CC" echo "$CXX" | grep "ccache" > /dev/null || CXX="ccache $CXX" fi export CC CXX LD_LIBRARY_PATH CFLAGS=${CFLAGS:-"-O2"} CXXFLAGS=${CXXFLAGS:-"$CFLAGS"} CPPFLAGS=${CPPFLAGS:-} initial_stage="galerautils" last_stage="galera" gainroot="" TARGET=${TARGET:-""} # default target while test $# -gt 0 do case $1 in --stage) initial_stage=$2 shift ;; --last-stage) last_stage=$2 shift ;; --gainroot) gainroot=$2 shift ;; -b|--bootstrap) BOOTSTRAP="yes" # Bootstrap the build system ;; -c|--configure) CONFIGURE="yes" # Reconfigure the build system ;; -s|--scratch) SCRATCH="yes" # Build from scratch (run make clean) ;; -o|--opt) OPT="yes" # Compile without debug ;; -d|--debug) DEBUG="yes" # Compile with debug NO_STRIP="yes" ;; -r|--release) RELEASE="$2" shift ;; -m32) CFLAGS="$CFLAGS -m32" CXXFLAGS="$CXXFLAGS -m32" SCRATCH="yes" TARGET="i686" ;; -m64) CFLAGS="$CFLAGS -m64" CXXFLAGS="$CXXFLAGS -m64" SCRATCH="yes" TARGET="x86_64" ;; -p|--package) PACKAGE="yes" # build binary packages ;; --with*-spread) WITH_SPREAD="$1" ;; --help) usage exit 0 ;; --source) SOURCE="yes" ;; --sb) SKIP_BUILD="yes" ;; --scons) SCONS="yes" ;; --so) SCONS_OPTS="$SCONS_OPTS $2" shift ;; -j|--jobs) JOBS=$2 shift ;; --dl) DEBUG_LEVEL=$2 shift ;; *) if test ! -z "$1"; then echo "Unrecognized option: $1" fi usage exit 1 ;; esac shift done # check whether sudo accepts -E to preserve environment if [ "$PACKAGE" == "yes" ] then echo "testing sudo" if sudo -E $true >/dev/null 2>&1 then echo "sudo accepts -E" SUDO="sudo -E" else echo "sudo does not accept param -E" if [ $(id -ur) != 0 ] then echo "error, must build as root" exit 1 else echo "I'm root, can continue" SUDO="" fi fi fi if [ "$OPT" == "yes" ]; then CONFIGURE="yes"; conf_flags="--disable-debug --disable-dbug"; fi if [ "$DEBUG" == "yes" ]; then CONFIGURE="yes"; fi if [ -n "$WITH_SPREAD" ]; then CONFIGURE="yes"; fi if [ "$CONFIGURE" == "yes" ] && [ "$SCONS" != "yes" ]; then SCRATCH="yes"; fi # Be quite verbose #set -x # Build process base directory build_base=${GALERA_SRC:-$(cd $(dirname $0)/..; pwd -P)} get_arch() { if ! [ -z "$TARGET" ] then if [ "$TARGET" == "i686" ] then echo "i386" else echo "amd64" fi elif [ "$OS" == "Darwin" ]; then if file $build_base/gcs/src/gcs.o | grep "i386" >/dev/null 2>&1 then echo "i386" else echo "amd64" fi else if file $build_base/gcs/src/gcs.o | grep "80386" >/dev/null 2>&1 then echo "i386" else echo "amd64" fi fi } build_packages() { local PKG_DIR=$build_base/scripts/packages pushd $PKG_DIR local ARCH=$(get_arch) local WHOAMI=$(whoami) export BUILD_BASE=$build_base export GALERA_VER=$RELEASE if [ $DEBIAN -eq 0 ] && [ "$ARCH" == "amd64" ] then ARCH="x86_64" export x86_64=$ARCH # for epm fi export $ARCH local STRIP_OPT="" [ "$NO_STRIP" == "yes" ] && STRIP_OPT="-g" $SUDO rm -rf $ARCH set +e if [ $DEBIAN -ne 0 ]; then # build DEB ./deb.sh $GALERA_VER elif [ "$OS" == "FreeBSD" ]; then if test "$NO_STRIP" != "yes"; then strip $build_base/{garb/garbd,libgalera_smm.so} fi ./freebsd.sh $GALERA_VER else # build RPM ./rpm.sh $GALERA_VER fi local RET=$? set -e popd if [ $DEBIAN -ne 0 ]; then mv -f $PKG_DIR/$ARCH/*.deb ./ elif [ "$OS" == "FreeBSD" ]; then mv -f $PKG_DIR/*.tbz ./ else mv -f $PKG_DIR/*.rpm ./ fi return $RET } build_source() { local module="$1" shift local build_dir="$build_base/$module" pushd $build_dir if [ ! -x "configure" ]; then ./bootstrap.sh; fi if [ ! -s "Makefile" ]; then ./configure; fi local src_base_name="lib${module}-" rm -rf "${src_base_name}"*.tar.gz make dist || (echo $?; echo "make dist failed"; echo) local ret=$(ls "${src_base_name}"*.tar.gz) popd echo $build_dir/$ret } build_sources() { local module local srcs="" for module in "galerautils" "gcomm" "gcs" "galera" do src=$(build_source $module | tail -n 1) srcs="$srcs $src" done local ret="galera-source-r$RELEASE.tar" tar --transform 's/.*\///' -cf $ret $srcs \ "source/README" "source/COPYING" "source/build.sh" # return absolute path for scripts echo $PWD/$ret } pushd "$build_base" #GALERA_REV="$(svnversion | sed s/\:/,/g)" #if [ "$GALERA_REV" == "exported" ] #then GALERA_REV=$(git log --pretty=oneline | wc -l) || \ GALERA_REV=$(bzr revno --tree -q) || \ GALERA_REV=$(svn info >&/dev/null && svnversion | sed s/\:/,/g) || \ GALERA_REV="XXXX" # trim spaces (sed is not working on Solaris, so using bash built-in) GALERA_REV=${GALERA_REV//[[:space:]]/} #fi popd #if [ -z "$RELEASE" ] #then # RELEASE=$GALERA_REV #fi if [ "$SCONS" == "yes" ] # Build using Scons then # Scons variant dir, defaults to GALERA_SRC export SCONS_VD=$build_base scons_args="-C $build_base revno=$GALERA_REV tests=$RUN_TESTS" [ -n "$TARGET" ] && scons_args="$scons_args arch=$TARGET" [ -n "$RELEASE" ] && scons_args="$scons_args version=$RELEASE" [ "$DEBUG" == "yes" ] && scons_args="$scons_args debug=$DEBUG_LEVEL" [ -n "$EXTRA_SYSROOT" ] && scons_args="$scons_args extra_sysroot=$EXTRA_SYSROOT" if [ "$SCRATCH" == "yes" ] then scons -Q -c --conf=force $scons_args $SCONS_OPTS fi if [ "$SKIP_BUILD" != "yes" ] then scons $scons_args -j $JOBS $SCONS_OPTS fi elif test "$SKIP_BUILD" == "no"; then # Build using autotools echo "Error: autotools not supported anymore! Nothing was built." exit 1 fi # SKIP_BUILD / SCONS if test "$PACKAGE" == "yes" then build_packages fi if test "$SOURCE" == "yes" then build_sources fi galera-3-25.3.20/scripts/mysql/0000755000015300001660000000000013042054732015772 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/freebsd.sh0000755000015300001660000000546313042054732017753 0ustar jenkinsjenkins#!/bin/bash -eu if [ $# -ne 2 ] then echo "Usage: $0 " exit 1 fi MYSQL_VER=$1 RELEASE=$2 MAJORMINOR=$(echo $MYSQL_VER | awk -F . '{print $1$2}') # Absolute path of this script folder SCRIPT_ROOT=$(cd $(dirname $0); pwd -P) PBR="$SCRIPT_ROOT/dist/mysql/usr/local" install -d "$PBR"/{etc/rc.d,libdata/ldconfig} install -m 555 "$SCRIPT_ROOT/freebsd/mysql-server.sh" "$PBR/etc/rc.d/mysql-server" install -m 444 "$SCRIPT_ROOT/freebsd/client-ldconfig" "$PBR/libdata/ldconfig/mysql${MAJORMINOR}-client" shopt -s nullglob for i in {1..9}; do for f in "$PBR/man/man$i/"*.$i; do gzip -c $f > $f.gz done done shopt -u nullglob install -m 644 "$SCRIPT_ROOT/freebsd/server-"{plist,descr,comment,message} "$PBR" sed -e "s!%{SRCDIR}!$PBR!" -e "s!%{RELEASE}!$RELEASE!" -e "s!%{MYSQL_VER}!$MYSQL_VER!" \ -e "s!%{MAJORMINOR}!$MAJORMINOR!" -i "" "$PBR/server-"{plist,descr,comment,message} \ "$PBR/share/licenses/mysql-client-${MYSQL_VER}_wsrep_${RELEASE}/catalog.mk" for pkg in $(grep '^@comment DEPORIGIN:' "$PBR/server-plist" | cut -d : -f 2); do if [[ "$pkg" != *_wsrep* ]]; then pkgdep=$(/usr/sbin/pkg_info -q -O "$pkg") if [ -z "$pkgdep" ]; then echo "ERROR: failed to find dependency package '$pkg'" >&2 exit 1 fi sed -e "s!^@comment DEPORIGIN:$pkg!@pkgdep $pkgdep"$'\\\n&!' -i "" "$PBR/server-plist" fi done /usr/sbin/pkg_create -c "$PBR/server-comment" \ -d "$PBR/server-descr" \ -D "$PBR/server-message" \ -f "$PBR/server-plist" \ -m "$SCRIPT_ROOT/freebsd/server-mtree" \ -v "mysql-server-${MYSQL_VER}_wsrep_${RELEASE}-$(uname -m).tbz" install -m 644 "$SCRIPT_ROOT/freebsd/client-"{plist,descr,comment,message} "$PBR/" sed -e "s!%{SRCDIR}!$PBR!" -e "s!%{RELEASE}!$RELEASE!" -e "s!%{MYSQL_VER}!$MYSQL_VER!" \ -e "s!%{MAJORMINOR}!$MAJORMINOR!" -i "" "$PBR/client-"{plist,descr,comment,message} \ "$PBR/share/licenses/mysql-client-${MYSQL_VER}_wsrep_${RELEASE}/catalog.mk" for pkg in $(grep '^@comment DEPORIGIN:' "$PBR/client-plist" | cut -d : -f 2); do if [[ "$pkg" != *_wsrep* ]]; then pkgdep=$(/usr/sbin/pkg_info -q -O "$pkg") if [ -z "$pkgdep" ]; then echo "ERROR: failed to find dependency package '$pkg'" >&2 exit 1 fi sed -e "s!^@comment DEPORIGIN:$pkg!@pkgdep $pkgdep"$'\\\n&!' -i "" "$PBR/client-plist" fi done /usr/sbin/pkg_create -c "$PBR/client-comment" \ -d "$PBR/client-descr" \ -D "$PBR/client-message" \ -f "$PBR/client-plist" \ -m "$SCRIPT_ROOT/freebsd/client-mtree" \ -v "mysql-client-${MYSQL_VER}_wsrep_${RELEASE}-$(uname -m).tbz" exit 0 galera-3-25.3.20/scripts/mysql/rpm_wc.sh0000755000015300001660000000701513042054732017623 0ustar jenkinsjenkins#!/bin/bash # This script tries to build RPMs from MySQL/wsrep working copy # probably will never work due to lack of essential files (manpages, etc.) if test -z "$MYSQL_SRC" then echo "MYSQL_SRC variable pointing at MySQL/wsrep sources is not set. Can't continue." exit -1 fi usage() { echo -e "Usage: build.sh [OPTIONS] \n" \ "Options: \n" \ " -r|--release configure build with debug disabled (implies -c)\n"\ " -d|--debug configure build with debug enabled (implies -c)\n"\ " --no-strip prevent stripping of release binaries\n"\ "\n -s and -b options affect only Galera build.\n" } # Parse command line while test $# -gt 0 do case $1 in -r|--release) RELEASE=yes # Compile without debug ;; -d|--debug) DEBUG=yes # Compile with debug NO_STRIP=yes # Don't strip the binaries ;; --no-strip) NO_STRIP=yes # Don't strip the binaries ;; --help) usage exit 0 ;; *) usage exit 1 ;; esac shift done set -x set -e # Absolute path of this script folder BUILD_ROOT=$(cd $(dirname $0); pwd -P) #GALERA_SRC=${GALERA_SRC:-$BUILD_ROOT/../../} # Source paths are either absolute or relative to script, get absolute MYSQL_SRC=$(cd $MYSQL_SRC; pwd -P; cd $BUILD_ROOT) ###################################### ## ## ## Build MySQL ## ## ## ###################################### # Obtain MySQL version and revision of Galera patch pushd $MYSQL_SRC # make dist if test -f Makefile then time make maintainer-clean > /dev/null # make distclean fi #if ! test -f configure #then time BUILD/autorun.sh #fi WSREP_REV=$(bzr revno); export WSREP_REV time ./configure --with-wsrep > /dev/null # MySQL has a mindblowing make system that requires extra/comp_err to be built # for 'make dist'. comp_err requires prebuilt mysys and dbug but they are not # built automatically by make dist. pushd include; make > /dev/null; popd pushd strings; make > /dev/null; popd pushd mysys; make > /dev/null; popd pushd dbug; make > /dev/null; popd pushd support-files; rm -rf *.spec; make > /dev/null; popd pushd libmysql; make link_sources > /dev/null; popd pushd libmysql_r; make link_sources > /dev/null; popd pushd libmysqld; make link_sources > /dev/null; popd pushd client; make link_sources > /dev/null; popd time make dist > /dev/null MYSQL_VER=$(grep 'MYSQL_NO_DASH_VERSION' $MYSQL_SRC/Makefile | cut -d ' ' -f 3) #if test -d /usr/src/redhat #then #export RPM_BUILD_ROOT=/usr/src/redhat #else RPM_BUILD_ROOT=/tmp/redhat #fi mkdir -p $RPM_BUILD_ROOT pushd $RPM_BUILD_ROOT mkdir -p BUILD RPMS SOURCES SPECS SRPMS pushd RPMS mkdir -p athlon i386 i486 i586 i686 noarch popd; popd mv mysql-$MYSQL_VER.tar.gz $RPM_BUILD_ROOT/SOURCES/ MYSQL_SPEC=$MYSQL_SRC/support-files/mysql-$MYSQL_VER.spec mv $MYSQL_SPEC $RPM_BUILD_ROOT/SPECS MYSQL_SPEC=$RPM_BUILD_ROOT/SPECS/mysql-$MYSQL_VER.spec i686_cflags="-march=i686 -mtune=i686" amd64_cflags="-m64 -mtune=opteron" fast_cflags="-O3 -fno-omit-frame-pointer" uname -m | grep -q i686 && \ export RPM_OPT_FLAGS="$i686_cflags $fast_cflags" || \ export RPM_OPT_FLAGS="$amd64_cflags $fast_cflags" RPMBUILD="rpmbuild --clean --rmsource \ --define \"_topdir $RPM_BUILD_ROOT\" \ --define \"optflags $RPM_OPT_FLAGS\" \ --with wsrep -ba $MYSQL_SPEC \ 2>&1 > $RPM_BUILD_ROOT/rpmbuild.log" chown -R mysql $RPM_BUILD_ROOT su mysql -c "$RPMBUILD" exit 0 galera-3-25.3.20/scripts/mysql/mysql-galera0000755000015300001660000004221113042054732020316 0ustar jenkinsjenkins#!/bin/bash -e SELF=$(cd $(dirname $0); pwd -P)/$(basename $0) # Copyright (C) 2007, 2008 Codership Oy # # This file is free software; as a special exception the author gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY, to the extent permitted by law; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # WHAT IT IS: # This script starts mysqld from Galera test distribution. It may be adapted # to be run from /etc/init.d directory for automatic server startup. # # USAGE: # GCS_ADDRESS=
mysqld_galera start|stop|restart|check|create # # By default empty backend is "dummy". # # 'check' command takes options: [database [table_to_ignore] [table_to_ignore]...] # # USEFUL ENVIRONMENT VARIABLES: # # MY_CNF - specifies the configuration file # GCS_ADDRESS - overrides setting in my.cnf # MYSQL_PORT - port to listen for client connections (default: 3306) # LIBGALERA - location of the galera shared library file # # Convention: mysql pid file is stored in mysql data dir under name 'mysql.pid' # # Normally only the following parameters need to be changed: # Where mysql data directory is located (by default - inside mysql installation) # MYSQL_DATA_DIR # Where mysql installation is located (by default determined from this script location) MYSQL_BASE_DIR=${MYSQL_BASE_DIR:-"$(dirname $SELF)/mysql"} GALERA_BASE_DIR=${GALERA_BASE_DIR:-"$(dirname $SELF)/galera"} # MySQL configuration file MY_CNF=${MYSQL_CNF:-"$MYSQL_BASE_DIR/etc/my.cnf"} if test -s "$MY_CNF" then DEFAULTS_OPTION=" --defaults-file=$MY_CNF " my_cnf_datadir=$(grep ^datadir $MY_CNF | sed s/[^/]*//) else DEFAULTS_OPTION=" --no-defaults " fi # If it was not given explicitely, use the one from my.cnf MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$my_cnf_datadir"} # If it was not found in my.cnf, use distribution default MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$MYSQL_BASE_DIR/var"} MYSQLD_USER=$(whoami) #======================================================================= ## ## Tweak the following if needed ## # use mysqld server directly, better not have automatic restarting MYSQLD="$MYSQL_BASE_DIR/sbin/mysqld" [ ! -x "$MYSQLD" -a -x "${MYSQLD}-debug" ] && MYSQLD=${MYSQLD}-debug MYSQLADMIN="$MYSQL_BASE_DIR/bin/mysqladmin" # Port, socket and pid files MYSQL_PORT=${MYSQL_PORT:-3306} MYSQL_SOCKET=${MYSQL_SOCKET:-"$MYSQL_DATA_DIR/mysqld.sock"} MYSQL_PID=${MYSQL_PID:-"$MYSQL_DATA_DIR/mysqld.pid"} # Startup wait timeout. MYSQL_STARTUP_WAIT=${MYSQL_STARTUP_WAIT:-1} # Shutdown wait timeout. MYSQL_SHUTDOWN_WAIT=60 #============= Nothing servicable below ================================ # User to run as if started under superuser #if test "$MYSQLD_USER" = "root" #then # MYSQLD_USER=mysql #fi ROOT_USER=${ROOT_USER:-"-uroot"} ROOT_PSWD=${ROOT_PSWD:-"-prootpass"} # Mandatory stuff INNODB_OPTS=" --default-storage-engine=InnoDB " # --debug terribly affects performance #DEBUG_OPTS=" --debug " #DEBUG_OPTS=" --debug=d,galera,wsdb:t:i:o,$MYSQL_DATA_DIR/mysqld.trc" SKIP_RECOVERY=0 START_POSITION="" err_log="$MYSQL_DATA_DIR/mysqld.err" mysql_log="$MYSQL_DATA_DIR/mysqld.log" usage() { cat - << EOF usage: mysql-galera [options] command Options: -c|--command : command to execute (status) --data_dir : location for mysql data directories --db : name of database to check -d|--debug : enable debug output --donor : desired state transfer donor -g|--gcs_address : address of gcs server (spread://locahost:4803) --gdb : run under gdb -i|--ignore_table : table to ignore in checking -l|--log : enable mysql query log --mysql-opt : an option to the server to follow -p|--password : mysql root user password --plugin : use innodb plugin --slave_threads : number of consurrent ws appliers (1) -u|--user : mysql root user --valgrind : run under valgrind --ws_level : RBR (default) or SQL -P|--port : port for MySQL client connections -S|--socket : location of mysqld socket --skip-recovery : skip recovery phase --start-position : start position passed to server Commands: check : check cosistency of database given with --db option start : start servers stop : stop servers restart : stop and start servers status : show running servers EOF } # Checks if a process with a given PID is still running find_pid() { ps ax | grep mysqld | grep -w ^\ *$1 > /dev/null } get_status() { local stat_var=$1 set -o pipefail mysql $ROOT_USER $ROOT_PSWD --socket $MYSQL_SOCKET --skip-column-names \ --reconnect -Be "SET wsrep_on=0; SHOW STATUS LIKE '$stat_var'" | \ cut -f 2 } # Loop until wsrep_ready == ON wait_for_wsrep_ready() { echo -n "Waiting for wsrep_ready" local pid=$1 local ret=1 local ready while find_pid $pid do echo -n "." sleep 1 # mysql connection can be interrupted by SST code, # don't fail on it rigth away ready=$(get_status "wsrep_ready") && \ if [ $ret ] && [ "$ready" == "ON" ]; then ret=0; break; fi done if [ $ret ] then echo " Done" else echo " Failed" fi return $ret } check_gcs_address() { local addr="$1" if [ -n "$addr" ] then case "$addr" in "gcomm://"*) # we could be checking address here, but generally it does not # have to exist yet. ;; "dummy://"*) ;; *) echo "Cluster address should start with either 'gcomm://' or 'dummy://'. Server not started." >&2 exit 1 esac fi } wsrep_start_position_opt="" # Run mysqld with --wsrep-recover and parse recovered position from log. # Position will be stored in wsrep_start_position_opt global. wsrep_recovery() { cmd="$@" wr_logfile=$(mktemp -t wsrep.XXXXXXXXXX) echo "WSREP: Running position recovery" set +e [ "$OS" == "Darwin" ] && export LD_LIBRARY_PATH $cmd --log_error=$wr_logfile --wsrep-recover [ "$OS" == "Darwin" ] && export -n LD_LIBRARY_PATH rp=$(grep "WSREP: Recovered position:" $wr_logfile) if [ -z "$rp" ]; then skipped=$(grep WSREP $wr_logfile | grep "skipping position recovery") if [ -z "$skipped" ]; then echo "WSREP: Failed to recover position: " \ `cat $wr_logfile`; else echo "WSREP: Position recovery skipped" fi else start_pos=$(echo $rp | sed 's/.*WSREP\:\ Recovered\ position://' \ | sed 's/^[ \t]*//') wsrep_start_position_opt="--wsrep_start_position=$start_pos" echo "WSREP: Recovered position $start_pos" fi set -e rm $wr_logfile } galera_start() { local failed if ! test -x $MYSQLD then echo "$MYSQLD executable not found" exit -1 fi if test -f $MYSQL_PID then echo "Found existing '$MYSQL_PID'. Please run '$0 stop'" exit -1; fi # if [ -n "$MYSQL_LOG" ] # then # LOGGING_OPTS=" --general_log=1 --log_output=FILE " # fi if [ -n "$WS_LEVEL" ] then RBR_OPTS=" --binlog_format=$WS_LEVEL " fi WSREP=${WSREP:-"$GALERA_BASE_DIR/lib/libgalera_smm.so"} if test -f $WSREP || test $WSREP == "none" then WSREP_OPTS="--wsrep_provider=$WSREP" else echo "WSREP driver '$WSREP' not found" exit -1 fi if test -n "$GCS_ADDRESS" then check_gcs_address $GCS_ADDRESS WSREP_OPTS="$WSREP_OPTS --wsrep_cluster_address=$GCS_ADDRESS" fi if test -n "$WSREP_SST_DONOR" then WSREP_OPTS="$WSREP_OPTS --wsrep_sst_donor=$WSREP_SST_DONOR" fi if test -n "$SLAVE_THREADS" then WSREP_OPTS="$WSREP_OPTS --wsrep_slave_threads=$SLAVE_THREADS" fi if test -f "$MYSQL_DATA_DIR/core" then mv "$MYSQL_DATA_DIR/core" "$MYSQL_DATA_DIR/core.old" fi echo -n "Starting mysqld instance with data dir $MYSQL_DATA_DIR and listening at port $MYSQL_PORT and socket $MYSQL_SOCKET..." ulimit -n 4096 # This is normally allowed for non-privileged users echo 0x3f > /proc/self/coredump_filter # dump mmapped memory as well if test $SKIP_RECOVERY = 0 then wsrep_recovery $MYSQLD \ $DEFAULTS_OPTION \ --user="$MYSQLD_USER" \ --basedir="$MYSQL_BASE_DIR" \ --datadir="$MYSQL_DATA_DIR" \ --plugin-dir=lib/mysql/plugin \ --pid-file="$MYSQL_PID" \ --port=$MYSQL_PORT \ --socket=$MYSQL_SOCKET \ --skip-external-locking \ --log_error=$err_log \ $MYSQLD_OPTS \ $INNODB_OPTS \ $WSREP_OPTS \ $DEBUG_OPTS \ $LOGGING_OPTS \ $RBR_OPTS \ $PLUGIN_OPTS else echo "skipping recovery" if test -n "$START_POSITION" then wsrep_start_position_opt="--wsrep-start-position=$START_POSITION" fi fi [ "$OS" == "Darwin" ] && export LD_LIBRARY_PATH if test -z $GDB then nohup $VALGRIND $MYSQLD \ $DEFAULTS_OPTION \ --user="$MYSQLD_USER" \ --basedir="$MYSQL_BASE_DIR" \ --datadir="$MYSQL_DATA_DIR" \ --plugin-dir=lib/mysql/plugin \ --pid-file="$MYSQL_PID" \ --port=$MYSQL_PORT \ --socket=$MYSQL_SOCKET \ --skip-external-locking \ --log_error=$err_log \ $INNODB_OPTS \ $WSREP_OPTS \ $DEBUG_OPTS \ $LOGGING_OPTS \ $RBR_OPTS \ $PLUGIN_OPTS \ $wsrep_start_position_opt \ $MYSQLD_OPTS \ 1>/dev/null 2>>$err_log & else $GDB --args $MYSQLD \ $DEFAULTS_OPTION \ --user="$MYSQLD_USER" \ --basedir="$MYSQL_BASE_DIR" \ --datadir="$MYSQL_DATA_DIR" \ --plugin-dir=lib/mysql/plugin \ --pid-file="$MYSQL_PID" \ --port=$MYSQL_PORT \ --socket=$MYSQL_SOCKET \ --skip-external-locking \ --log_error=$err_log \ $INNODB_OPTS \ $WSREP_OPTS \ $DEBUG_OPTS \ $LOGGING_OPTS \ $RBR_OPTS \ $PLUGIN_OPTS \ $wsrep_start_position_opt \ $MYSQLD_OPTS fi my_pid=$! [ "$OS" == "Darwin" ] && export -n LD_LIBRARY_PATH # echo "Waiting for pid file" second=0 while ! test -r $MYSQL_PID do sleep 1 second=$(($second + 1)) if find_pid $my_pid then # process is alive, wait for pid file echo -n "." elif test $MYSQL_STARTUP_WAIT -lt 0 -o \ $second -lt $MYSQL_STARTUP_WAIT then # process is not yet alive, wait for it to start echo -n "." else failed="yes" break fi done if test "$failed" != "yes" then echo " Done (PID:$(cat $MYSQL_PID))" else echo " Failed (PID:$my_pid)" return 1 fi wait_for_wsrep_ready $my_pid } galera_stop() { # check pid file if test -r $MYSQL_PID then # check if corresponding mysqld is running # if ps axc | grep mysqld | grep $(cat $MYSQL_PID) >/dev/null 2>&1 local my_pid=$(cat $MYSQL_PID) if find_pid $my_pid then echo -n "Killing PID $my_pid" kill $my_pid # wait for pid file to disappear for second in $(seq 1 $MYSQL_SHUTDOWN_WAIT) do echo -n "." sleep 1 if test ! -r $MYSQL_PID then break fi done echo "" if test "$second" = "$MYSQL_SHUTDOWN_WAIT" then echo -n "Failed to stop mysqld safely. Killing with -9... " kill -9 $my_pid fi else echo -n "Removing stale PID file $MYSQL_PID... " fi rm -f $MYSQL_PID echo "Done" else echo "PID file not found: $MYSQL_PID" fi } galera_restart() { galera_stop galera_start } galera_status() { if test -f $MYSQL_PID then local my_pid=$(cat $MYSQL_PID) if find_pid $my_pid then echo "mysqld running with PID: $my_pid" else echo "Found existing '$MYSQL_PID', but mysqld is not running" fi exit 0; else echo "no PID file: '$MYSQL_PID'" fi } dump() { #local ROUTINES="--routines" # don't dump routines yet, will cause false err. #local OPTIONS="--create-options" gives false positives on AUTO_INCREMENT tbls # --flush-logs --lock-all-tables # this blocks waiting for all trx to complete # thus impossible to use with -STOP/CONT local DUMP_OPTIONS=" --skip-opt --compact --quick --order-by-primary \ $OPTIONS --set-charset --skip-comments $ROUTINES " DB=${DB:-"--all-databases"} #set -x mysqldump $DUMP_OPTIONS $ROOT_USER $ROOT_PSWD --socket $MYSQL_SOCKET \ $IGNORE_TABLES $DB #set +x } wait_for_last_committed() { local lc local new_lc lc=$(get_status "wsrep_last_committed") while [ 1 ] do sleep 1 new_lc=$(get_status "wsrep_last_committed") if [ "$lc" == "$new_lc" ]; then break; fi lc="$new_lc" done } checksum() { wait_for_last_committed set -o pipefail if [ "$OS" == "Darwin" -o "$OS" == "FreeBSD" ]; then CS=$(dump | md5)" -" || return $? else CS=$(dump | md5sum) || return $? fi echo $CS } # to use valgrind or not VALGRIND=${VALGRIND:-""} # write set level, SQL, RBR or ROW WS_LEVEL="" #DB="test" # use 'test' database if none given # in 5.6 the following tables are non-deterministic IGNORE_TABLES=\ "--ignore-table=mysql.innodb_table_stats --ignore-table=mysql.innodb_index_stats" # to use innodb plugin or not PLUGIN_OPTS="" if [ $# -eq 0 ]; then usage; exit 1; fi while [ $# -gt 0 ]; do case $1 in -h|--help) usage exit 0 ;; -d|--debug) DEBUG_OPTS=" --wsrep_debug=1 " ;; --dbug) DBUG_OPTS=" --debug=d,galera,wsdb:t:i:o" ;; -l|--log) LOGGING_OPTS=" --general_log=1 --log_output=FILE " # MYSQL_LOG="log" ;; --valgrind) VALGRIND="valgrind --log-file=$MYSQL_DATA_DIR/vg.log --leak-check=full --track-origins=yes" # to force deallocation in std::string and STL containers export GLIBCXX_FORCE_NEW=1 ;; --gdb) GDB="gdb" ;; -g|--gcs_address) GCS_ADDRESS=$2 shift ;; --donor) WSREP_SST_DONOR=$2 shift ;; --slave_threads) SLAVE_THREADS=$2 shift ;; --db) DB=$2 shift ;; -i|--ignore_table) IGNORE_TABLES=" $IGNORE_TABLES --ignore-table=$DB.$2 " shift ;; --ws_level) WS_LEVEL=$2 shift ;; -u|--user) ROOT_USER="-u$2" shift ;; -p|--password) ROOT_PSWD="-p$2" shift ;; --plugin) PLUGIN_OPTS="--ignore_builtin_innodb --plugin-load=innodb=ha_innodb_plugin.so;innodb_trx=ha_innodb_plugin.so;innodb_locks=ha_innodb_plugin.so;innodb_lock_waits=ha_innodb_plugin.so;innodb_cmp=ha_innodb_plugin.so;innodb_cmp_reset=ha_innodb_plugin.so;innodb_cmpmem=ha_innodb_plugin.so;innodb_cmpmem_reset=ha_innodb_plugin.so " ;; --data_dir) MYSQL_DATA_DIR=$2 shift ;; -c|--command) COMMAND=$2 ;; --mysql-opt) MYSQLD_OPTS="$MYSQLD_OPTS $2" shift ;; -P|--port) MYSQL_PORT="$2" shift ;; -S|--socket) MYSQL_SOCKET="$2" shift ;; --skip-recovery) SKIP_RECOVERY=1 ;; --start-position) START_POSITION="$2" shift ;; 'dump') COMMAND="dump" ;; 'check') COMMAND="checksum" ;; 'start') COMMAND=galera_start ;; 'stop') COMMAND=galera_stop ;; 'restart') COMMAND=galera_restart ;; 'status') COMMAND=galera_status ;; 'create') COMMAND="create_data_dir $2" shift ;; *) # must be command echo "error parsing: $@" usage exit 1 ;; esac shift done if [ -z "$COMMAND" ] then usage >&2 exit 1 fi OS=$(uname) export LD_LIBRARY_PATH=$MYSQL_BASE_DIR/lib/mysql:$LD_LIBRARY_PATH [ "$OS" == "FreeBSD" ] && LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/gcc48 [ "$OS" == "Darwin" ] && export -n LD_LIBRARY_PATH export PATH=$MYSQL_BASE_DIR/bin:$PATH $COMMAND # galera-3-25.3.20/scripts/mysql/get_patch.sh0000755000015300001660000000401013042054732020262 0ustar jenkinsjenkins#!/bin/bash -u set -x usage() { echo -e "Usage: $0 " } if [ $# -lt 2 ] then usage exit -1 fi #set -x set -e OS=$(uname -s) function MD5SUM() { if [ "$OS" == "Darwin" -o "$OS" == "FreeBSD" ]; then md5 -q $1 else md5sum $1 | awk '{ print $1 }' fi } # Source paths are either absolute or relative to script, get absolute THIS_DIR=$(pwd -P) cd $2 WSREP_REV=$(git log --pretty=oneline | wc -l) || \ WSREP_REV=$(bzr revno --tree -q) || \ WSREP_REV="XXXX" WSREP_REV=${WSREP_REV//[[:space:]]/} WSREP_PATCH_SPEC=$1-$WSREP_REV # Check existing file # This is done to not to depend on LP operation, however it looks like # any changes uncommitted locally might go unnoticed as revno stays the same WSREP_PATCH_FILE=$(ls $THIS_DIR/${WSREP_PATCH_SPEC}_*_.diff 2>/dev/null || : ) if [ -r "$WSREP_PATCH_FILE" ] then WSREP_PATCH_MD5SAVE=$(basename $WSREP_PATCH_FILE | awk -F _ '{ print $2 }' ) WSREP_PATCH_MD5TEST=$(MD5SUM $WSREP_PATCH_FILE | awk '{ print $1 }') if [ $WSREP_PATCH_MD5SAVE = $WSREP_PATCH_MD5TEST ] then # to be safe we better regenerate the patch every time echo $WSREP_PATCH_FILE > /dev/null # exit 0 fi fi # Existing file either not found or corrupted, try to create a new one rm -f $WSREP_PATCH_FILE #MYSQL_BRANCH="lp:mysql-server/5.1" #MYSQL_LP_REV=$( bzr tags -d $MYSQL_BRANCH | grep -m1 "$1" | awk '{ print $2 }' ) #if [ -z "$MYSQL_LP_REV" ] #then # echo "No such tag/revision: $1" # exit -1 #fi WSREP_PATCH_TMP="$THIS_DIR/$WSREP_PATCH_SPEC.diff" git diff $1..HEAD > $WSREP_PATCH_TMP || \ bzr diff -p1 -v --diff-options " --exclude=.bzrignore " \ -r tag:$1..branch:$2 \ > "$WSREP_PATCH_TMP" || if [ $? -gt 1 ]; then exit -1; fi # normally we expect bzr diff return 1 (changes available) WSREP_PATCH_MD5SUM=$(MD5SUM $WSREP_PATCH_TMP | awk '{ print $1 }') WSREP_PATCH_FILE=$THIS_DIR/${WSREP_PATCH_SPEC}_${WSREP_PATCH_MD5SUM}_.diff mv $WSREP_PATCH_TMP $WSREP_PATCH_FILE echo $WSREP_PATCH_FILE galera-3-25.3.20/scripts/mysql/my-5.1.cnf0000644000015300001660000000145713042054732017417 0ustar jenkinsjenkins# Default mysqld options [mysqld] core-file innodb_buffer_pool_size=420M innodb_log_file_size=100M innodb_flush_log_at_trx_commit=2 max_connections=1024 # # Here are options to load innodb plugin. Uncomment, if you want to # load innodb plugin during server start. # Note, mysql-galera start script has --plugin option, which sets these # plugin options on command line. Use one of these methods to load innodb # plugin, but not both # # MariaDB uses xtradb as a built-in, so no need to load any plugins ignore_builtin_innodb plugin-load=innodb=ha_innodb_plugin.so;innodb_trx=ha_innodb_plugin.so;innodb_locks=ha_innodb_plugin.so;innodb_lock_waits=ha_innodb_plugin.so;innodb_cmp=ha_innodb_plugin.so;innodb_cmp_reset=ha_innodb_plugin.so;innodb_cmpmem=ha_innodb_plugin.so;innodb_cmpmem_reset=ha_innodb_plugin.so galera-3-25.3.20/scripts/mysql/mysql-plain0000755000015300001660000000660413042054732020174 0ustar jenkinsjenkins#!/bin/bash SELF=$(cd $(dirname $0); pwd -P)/$(basename $0) # Where mysql installation is located (by default determined from this script location) if test -z "$MYSQL_BASE_DIR" then echo "MYSQL_BASE_DIR is not set" exit 1 fi # MySQL configuration file MY_CNF=${MYSQL_CNF:-"$MYSQL_BASE_DIR/etc/my.cnf"} if test -s "$MY_CNF" then DEFAULTS_OPTION=" --defaults-file=$MY_CNF " my_cnf_datadir=$(grep ^datadir $MY_CNF | sed s/[^/]*//) else DEFAULTS_OPTION=" --no-defaults " fi # If it was not given explicitely, take it from my.cnf MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$my_cnf_datadir"} # If it was not found in my.cnf, use default MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$MYSQL_BASE_DIR/var"} # use mysqld server directly, better not have automatic restarting MYSQLD=${MYSQLD:-"$(dirname $SELF)/../sql/mysqld"} # Port, socket and pid files MYSQL_PORT=${MYSQL_PORT:-3307} MYSQL_SOCKET=${MYSQL_SOCKET:-"$MYSQL_DATA_DIR/mysqld.sock"} MYSQL_PID=${MYSQL_PID:-"$MYSQL_DATA_DIR/mysqld.pid"} err_log="$MYSQL_DATA_DIR/$(hostname).err" # Checks if a process with a given PID is still running find_pid() { ps axc | grep mysqld | grep -w ^\ *$1 > /dev/null } mysql_start() { local failed if ! test -x "$MYSQLD" then echo "$MYSQLD executable not found" exit -1 fi if test -f $MYSQL_PID then echo "Found existing '$MYSQL_PID'. Please run '$0 stop'" exit -1; fi echo -n "Starting mysqld instance with data dir $MYSQL_DATA_DIR and listening to port $MYSQL_PORT and socket $MYSQL_SOCKET..." set -x nohup $MYSQLD \ $DEFAULTS_OPTION \ --basedir="$MYSQL_BASE_DIR" \ --datadir="$MYSQL_DATA_DIR" \ --pid-file="$MYSQL_PID" \ --port=$MYSQL_PORT \ --socket=$MYSQL_SOCKET \ --skip-locking \ --log_error=$err_log \ 1>/dev/null 2>>$err_log & my_pid=$! set +x # echo "Waiting for pid file" while ! test -r $MYSQL_PID do sleep 1 if find_pid $my_pid then # process is alive, wait for pid file echo -n "." else failed="yes" break fi done if test "$failed" != "yes" then echo " Done (PID:$(cat $MYSQL_PID))" else echo " Failed (PID:$my_pid)" fi } mysql_stop() { # check pid file if test -r $MYSQL_PID then # check if corresponding mysqld is running # if ps axc | grep mysqld | grep $(cat $MYSQL_PID) >/dev/null 2>&1 if find_pid $(cat $MYSQL_PID) then echo -n "Killing PID $(cat $MYSQL_PID)" kill $(cat $MYSQL_PID) # wait for pid file to disappear for second in $(seq 1 $MYSQL_SHUTDOWN_WAIT) do echo -n "." sleep 1 if test ! -r $MYSQL_PID then break fi done echo "" if test "$second" = "$MYSQL_SHUTDOWN_WAIT" then echo -n "Failed to stop mysqld safely. Killing with -9... " kill -9 $(cat $MYSQL_PID; rm -rf $MYSQL_PID) fi else echo -n "Removing stale PID file $MYSQL_PID... " rm -rf $MYSQL_PID fi echo "Done" else echo "PID file not found: $MYSQL_PID" fi } mysql_restart() { mysql_stop mysql_start } case "$1" in 'start') mysql_start $2 ;; 'stop') mysql_stop ;; 'restart') mysql_restart $2 ;; 'check') shift; checksum $* ;; *) echo "Usage: $0 start|stop|restart|check" esac # galera-3-25.3.20/scripts/mysql/mysql_var_5.5.tgz0000644000015300001660000016306213042054732021134 0ustar jenkinsjenkins)EN]yu3vaIV[c))cKrWwI޶ %GpֺiJݪE#4E\6ETvhia5pqp1v3ÙY.CX~s|K9(l3! >~lvFQ9epjRZYQu%RW :RCCc#GƆw*TVyΙA+O*{U3am@.AitEt]ƹ2[_qW|ev;mZ2?tys&.{!(|OH>gxKMyrNu]SU"s0J\խ |XnRQ_UVŢU*쎑@ 79qG nk$);*-cߑᑡc߱q\  O`+I顧G Lx_!8z`p' F @ i>"4¯`NMВL(5mp;ShĻmfc @ x\هgӧ'+$/$!jV1(*I*t~%WJ t7`Mn+GŞ/6EuM7%&0!~*]-Ave}jk¤%,k]-2juQis v_nq؆_c]S.3I6dEea@ @ >D/_h$/M*=i$oR҅&xgE-**6Drr46j; Cl;6`){R.l@ /75T*yPWA("}xrEABe 7<, E*uU.KY=˼V@Y tA0#+Us _h[Ixh 34lFz&% s0>~JU`7-`4]A|%EwO^!^/ꔹ dhMÙX"%Sp42a*2kp1i[ЊZլ"s\)oi_Cǎ95>64_;$3k!/@ @ }"ӐJTlxt@0z"X ]ѵ; @  f鷜>Vr~Ј>r127iXex "wo~r-Ox-ͦCs(ʵVz9\d X?3:::?;yktl5<=`V-+,I2&b@ @ ?w#q #@ ćoo*CE_wI ,Mi+My]"d_Xs}!\4ݬBqZZ?\UOy93۪<6Y5>zVV| ж*bl~DnN]@|navlCG04*kx8 ܍cs  [9귰?:.w|]bOA29gwO~9i;ayBh?MF@<,O0Gb?tE~@}@U"x |yD@!&yF-azI.4isp6 - rp~gx)oﴼA8Ϊ\=Q>+5y$ױM4ww‚iS:Rum{vZZiڊql)mk3GNjXn5[&ndp&o35p"wSR-VK+i,eRR,% FS 8ij%.\)UsX\QIΨWE%¥t31Y|5R 37[XyB+u%/Q_ m$"~Hl(ݛY14M;w5`_KPBB_l@+9˟-LG2 h{0q~ܦr'rs|X~vVb+R^eN ϒ#957Y9)R %ʒJ\\Gه7g*Dz79CN2Gsϥ|b{Mu!I'ryomy:ݐyGR'' aUnOƸzT*%za3|{-bd]4czLni\1 7~jgRf*Y%RoKy=vW75$YaE"dҖRhNIq"yfw]:vͼwO!;z^kg+ػwow#) J5+]ުVYko:yF5uwᄃG[ iRUw# CxZ,mbP-2[p:| AWX6S5*5Ԩ4A. 7/N޲:+Q߰⤘e2\{M"Gj狃 ϫo[=_E/ qG'іpi>fe}Ŷz,=sG^"6-]_!'i5vNuZ,1A\1WXܯ%BX|}yrktTۦkuZ?ԚeIiu\x?vN_v.ڧQ d.]ǧҾVn AGraH=j\eq)}pudxj-=aƪ" {*L]eH~fI3ܶL o&u4skm 9i ~i4lZ0[eGO&bNQWzQMc5=]JbcşZͲ9Xqʲԭk'y~9w=^S+%IGuNVfOU39o@ pO^-XʋY{ݼf5W>FX0Un }tv턷V̖cx V<89/fhH --6xtd<ԎT k"XtnYxU,}X`FjŚb֨ek~̴+a~hg7tc;/)R9iBatK?cIV8 (tBn1:6<0#X?.~.g @ @ ĵEazBCij\G#B큰|T(nv |# n@ ݾџV+TL*T+uO.'aIB {70}/x>U|Vγ5bP-HWBNes~-H\S)#7: .pQA0\ং}` v/W]zDϟ.}TEg %ppcaZao4Cfk5G#Ԝ-P5?4i5iz{ӌƦ-Mӛ֮ifm]T4&gL,גD8ME0}zN3X<;d#LHIDbE\4M$/qo4E89{:Jg'%ɝd&סx:==x"ͦphdgшOe'gSl2;1G%Mţlzv" qw<Kecl䚉jD:3R'4f"21̈́L[eḮ&Vu:b f戂ёQ~26!⻖ΤbLMJefBI]Bn*/q.H!cv֢T"$}47dQCӦhh٦SSYZT4h|6"G6efyS EST`1MXS7v+SZ"o[@7<>~y|?w+Ϭy`'@ @ >6  vjnbVA]6vۖ¦Z |JKg3mvc#ԇ@ ;l*A"}Ч"'?igHL_+]y)_WlN˪,%fmKrVjnv[b7{Y][zGcYHT.HRZA?R(5#?* sjE}T咔}RH \ "8HB]ΠЗJA~?51}4-{aI3 "tuE8L.1xDz`i|Tlov'VfO/\([{#P7 jŸ[KA]pnɓPrArOD%ꦂK,w-m$鹚$>dhv\kF-Ȟ&icFH(ٖzMd/ IrN.r3L2$rL6XA"R~T7/KdztWu*6w~n.NG}[w[qk=@]b)vhè"o7^Y[.d6^Z$f6oRq&X[MRbcͼY,[, 1[3%.orfr/.©U~I $&ʤ-prcmo2)pq}\\OGmZLN}mNO#pyfg@ '*  $qCm.wp;Nh*;U:IpP-/ЈMK(hϻkzvPPUQi>,fSD̀WGy9bF/c,EN?~(3z3 @DžGO7穃qrTmqWDMsS,orESբ[jx/@a?bu-uHyE^"9]?(kԃz3JQQIc#wR FQo$1ԛ`۩`/&rPR, <؏3UP>(qcv:U"3BkZdV P(R"9iҥrFP,^Yԗ-lye+mFce䶭:vr.'F})s#:5bn_ݑ1W р6Ed{1)aP D(aH D)@Q°@ˆ@) ~B8'0H@™ABxQ a^gS!/ 9B5D@e"zh=^YH&^Y8@o,!7E7E2΋,U-/" #h\rM#@!R;i% zh=ݤPB123s #3ܗ EBOa|Ior٠";<=|);jOE5ŢC`*I>ѝi#3#ӿ}EӟɅyulww/cX?!-gUzjtM9 0aPw(b\,^g/PA7Ba@{;[6X$ݯby0<Va!L=LR'Ƹ<>o G,?tt3@>@O=h Jdnۑ 3\,< u^7BݺMs>Gbԡ>](vߥGtGUEϣzPvا0j}KWJ~PԒR@Q,lzs= (QDe(  ݩG/ CwtԭcԱ(R-u48FRD aX e(%Pף=lE .I$e.Sw;LRnba%&bfvkع-f=`׮RlU#-OY#py '*P5j ӏy@tSz:d|U)2Ԃ)cb'P⓶N"$TdABt}AL8_Q0ОנH÷ꢋ&(4CW#ځN?}:OU<3LHSZgz6e ?yhb' Nl#+E_XwsEEߪh1olϙ< QM;c,o~P!@0o =DћC|/Iz}SWF׃;r~M˕?9lโݩܿF: 8~2Ў`:GD-*;.0KL0h!Z :H=%g &4kSW4ԃbs[x/ B]C@Dl$Ē!b6&!&uƒoJ``Ix&~ !@ 6'1mvZeNcpF)*yNxR N"$4r7;Nr{8^4ȣyBn@ާwYM!u"uv Urӳӓs~0Q.>hRߕ8AI |ɥ޶WҫYys5J$Y//S[)W~7KL*+W;rtpW$חWV?d7oܼu{gqb|Eףt=iYtˤyQF^NY y_kQk(m/Yo0-QvdO*rVtEO^URj))GNGZukc'YS9Dp'PYF3+ӘXh{vLOSC-q׮]|pؕSfk#LOW{mi`F.{l `;7ہPX':|1WK3`af5R+1dγst( -~1Lx_T`Mm`|h|r. Pn>bv~>cZ}2透hw<#6t`cѠͯseM-W lLJƐ1eDG\nnG3lt]gۏZfji>ܫybSIS7TrnO9݊xԓq.`y,'aך25,G-h\5[`j8ޞ<5(+ˆ:73_L3{IL@(*BYk^.ot4/t߁EEɚP+IN>9h;r¡ԙ?}9S{PgDGfJ${ܦ%AA/XbΦk(DZ!ZD>Q.*wRoTc+ׄgY׾&mƛԷ'_7OWwE`_q!|fTTUos[r_߼xO =SR?5BXae379&,@=ayK%6S]zc[<Oߺ)__Zs>wX-yGqfH~3aJ]Me~/NS:\ys+ΧrN7=D 3% xf_6im[6VtV:+/-6krԭDj=^p͛O_9/Tb*XVa^;UwS'ڛAe#!" Qc|sQVm_Չ=@#u4n80R2mp%F̯3Q.1P g(JQ7QC)P+&i%(hOq2W Lڵ MZWk~XnUZ~m46+}^]֖ڍ&ܐ2nNQ2媖W=UK&_1O`q;xBM#"p5;#}ΚUJ 2'z_-{IR:9~@&>bҺ^U ~Y6ۤUď٠#6hC+jŨiEo;Y7tz?x(tQO1;|l^3':a+i}"pZ5% $m#'eZ2 &',Qb(m{"l-xVk<ڜFyOѮT ;AKLI#*9oo4Qt\ u^jkVPvj!S zzUھW'FX91:!tB脏 zIޏ81R=0j zmPDk劢oUeDmlޡ'tjIWwJJ EIJCO&Q߉YlNOʺd{S^q~6ٴ!q+Ӳ: jejvxi+^,mڟ]@Jj^[I>8yNN2p[36XQoĜ`6 VD~lGKJ t\u* nV$-n"llnc..ɲ (&eabY֊ydyb8r<{xL2K33ch'K49{={Ͻu-/?>3Q DOiL0=qLfؼr+[DVe,(S!ȂTNy߅Lci@dXBtt@o?Ť1 |J| j'.Æа]90/ez.Y |"}CgPװ`M-#7z{OvK@=7k-W=Q6E X(ݰ`DG$H4Zj5azI}d<y)sP\ffZq[X0 :r'W_ekSX$-Їx}KO, sZv1wMCllsrl{&z\VKF{a}lCLg $ 69;%jw~/Ztc>c=us{#HWg#WQ}(}(?"Bv-02\es:bK$ 8YbӒwiKvst5nGgֆ1&=)K Aby*S?MGpGpsoq~fc=I5zH>_AKHZBׁ6"K6!rIVQrd)}U4TJ&yqY\vg}8g)G=r ٙ-̿#ixz[f;Mxi >p[*@[em؆GMaWc~}iyqsյ76WM =7) mU)gt%)\%)\|R|SYkh}MERC7LT'C?y#t}P]rtC0+3 ^sRO9 Slb9 GQ:m8(\-@VsX. @ _ ⠻L~L4˜0O-i.s]Uc)pC̘X2A|vIAஓW!F-Zz7}??٢fBCTĿ%I8SY [ڒ恵C[W[R`HaA}襇].v+{Wxi7H!P{W}n7TڻD>Rh#8zFOyX;Cő#nØlͦWmc=[RqfTL<ը:h{D#U}(ܘ`!Y&66`c 6>ll+{L;dK$/ N~}m,9TIqR?Uq9W1ZğD];{޽tΜVGIFjR(D8zKyK;{IR3(7=l9#R&L\3 @  t q=4;7{J4K[?5chLJz?2:[߳܍# d"ƻct,H7-c4J{[!"m#hqK9>~J+{m;ђi$my{?䀀 PrNK[e^2vi9f߂ ^GJf^}tҏ&&p %<{4H50SJ3Nfa Nzt۾۹u"otU»sk'$2skU])̴{[jZkdB7ɷW~^ݿU;)D?QzXVZO/n,) 'G_x;A_^_\`d|ۭFmmIt+ەgIKFsIАsDiPunjmF\۩:3mҜ͹7K+oMa=QO<$L&KV6U!˻_ 4^%dF]G#{ ){Tq ɮ|g-A\Jw+JsL %5t˚EIa{XIO N++dRtR*n%ӑVx2}Pi{Z+0_fXj)ezJgP^+}rb*^- xݬ3O6 7huzfii>wmqscS+ ss+;FDܐI/UhCK0l^ok\\[[^!WIB0*K ,8Pl_'^L?dgt5xCgCTVIe(Z*)pHdc#D֋O<3`13FbRR|Lvm,6nZ JR P|eg S#SA QFYkb&Jk;SZI"ZH[l=Y< gAǚR'Dkr&1)H~C %3qS P4S[lXϰa=zFhdWoxk Q]xnbC $=#9Zm;)k$}˯H Y\p$"0ixEDUl 0ޮMg="%oT$A,d1\CFޓy6"3yd}y=L(zx엡laٶ{wENna$) ɷ ȣ%sF9L78SS:e$6j8ɞ!FAM/O!=ROY#1Vʨ`\Q%RK@8@8gZ9d߳=~TGA=[T=_yP1 *2ij(\W*Oq ++<\rQ r J,CȆ6vuls#Ǔl6Fn{̫iL<~Oqqq6ЁSVV)+!Xy#CMWt7+;[|e$\[,7&@u@Gu.+VJϡL12߫{nߟKz40?w:^Sk+FSzJ;{z^f""z/㤸096)%#VetK$.v9x1lnafDPz;6#m?44>])Z0*AR8\ 4e F% %`56q5L!k:#C V{Db?ڹhDkJBq KāY׹o1/[)l/YyE =?iٝb~K1nM2*Ma9XZhMa]VX5% kiYDrhizH  R4PS m7i&aV]k=0ZIX'źxe ʇ+V>BO}ˮ̘crծ MP X`65J}Img<!Ņks[PLNݸ *'KqiП0ؒqNIA@JbŮ6[{UJ2x3H wYDRq&rA*0=\EWbY3\V?Z5+K7Mz(lZӗPHLI|z©δB Bp%5&k rYgWi,{ PxXK%AM\ot!TsT~Ml)ʿ+K[Fcc++4puqcpF3g))f/νZůsOެk[5H r翤kʖ4ѐ]mnp,qvQ 6P9xD#rq4Ty:dL^ېFPbGe?g^* R4Fj1>?1M07CLl8py5X` X]W 2^i,`8tmHfE@AC Z;x\ V;7>pBܔϾ.KWؽ"=HC{T?r)BXZK7k#SE, VGߜ[}nm촒]Y7$ OBج$$S ~iWm;@ w/3e!Pe+1J0>P+21Y'@@~F ``+\U&,k:*XP"6/C27U%&;>o~XiòwDǖD.M!OČj:U WWŞNI]2sԹC͓͢2gF(D;`@ 9sDaB ^:Hbzt e")SpUaP!wm~fGEܦ$وMTWޤT*QUbŏptC.Ųy3h-oxJyyY–1IVj;=>Ɣ i^6۴H#=A֑JI4ih$ MO4|~p2:m"@zgLWl5v<*ERFq+4*X4fAxC:RU=+X[_KӇ) h1{ObqpX$ՕHAG)h֕p@xl&is3fMYo4v?Vx ͮ~UiD y< %i3S~}/~C5E[-N>x/VdFq`zψg'Lm JME #(35g`Y}ziz\_5+uQAΥWG {}=' N@8eLU7b}azlz7f[rTqTNh@12_ 9+#vJaqur!xS'dLIy䉟ۨR3^ jW ב Ck=FMjQ: DNc`F(M5K2;^; w@!Tz8 >@I.$'rFG?Ea Ey<<@QC>.P* 3/;+ ~41|RgyTb>:^Bx qSȬ)0F%1& "gȘZABX6F@5&S\ƾ>KKNՓ8(TvH{H2JUkR2=;5I%N7 +|qK~4~1f<$  9(Ӛx_^ f@S'y\&sJ)oAhO1:U cCKxJf"9=0|M 7?V ϑmWάmO 4T68bcJ- M[驥SjmB}5"dvq{سR_3wV <95eRi[2c oB/r#k 2F,~[gƫqL>ֲpH&IOZ_ E?St qn:-0E>evuB\z|_6у,44BtG6q:&M!(>=:4J;v856#yV[p+  Te2~ѲԢ KjMɝnBx˸bB}Bq~ojm VU]#$3K']E/fw//]cm7NnʉO9&°~g~ ~¾K5ۣ'Hg7?ܡ?&Нs`#ϻGxb;;tGN[&ama4h,X@cD'ҧXw=gHB3VJ5 |Jb]=5`?x#p+#24kx܋ 6JKb%u4N Ĺ2eopPӑS ^jWuYI;z!(/Zf 19YD`&ŏ^4~b?V>XSb(|-cj RR*8n[{5Nt2tLt23giZ^Z>68-9i*)L37` xlu358s  cL+34:!b$f.QAU~U#ithEcD~!I>H 9̟ fM#mjѯI_ |kA˟6maj`ҨL"bЪ\~?<,: aXԸB=ڃtŗNAv'YpkEA=%Ȓe)?A!߶宇),XTmz ayN:KTF'dC]&3p>1"~vi~vI ۉX0[QҮ,8K,5g %pYj3}@9K~5ɝvv_|~4\Z|RG'23yV+;V¹lD;NXCI0ZJ?Dx6yәc(,K5dEa4pʚJ\Bz֙C8mÔt5WmRSzĊ;Pi{tsg@L 715X3DVQ~ĝd+`rDn\jjj#?`{͗ݣTĹ6=VךkINMżlߧgӕ-Dρ@ZVR,6 ڙJzҜ,c}N0nwjeV7PN}kZc6Jyo gMkBܫچag)Jʎ(ʴxΓs7[:6`f((-D[U3@洒ʴxBh 728|ǟT*/Ĩ3!n.dϘ1$0N!%x5f'@l;r r?ZNt2 SHjɥ݃QFȦS3tͦ7d5?B%4)vN3߬y|s\ `B)4F$RLv<.]"m\b# PI!!ex/.**+|VJ9G*g78Bį5C@7m#Q0Lh&4TZvB.u,r x&f:-l|B|lxJg*[p>z=x9a ˙.YnÄ_Do]2嫤H ZTJ%'ҖSɒ4n" zYP:'a ^PbkGQBLd➠.eUՀ*haP*TAny@ P U@t U::e_?3-,Z6ԚZ(boz*mme O+\jC'4d!c]fm*-ת^O+P Z%v4xO0ظ) Zƶ$H$3ܥ e$!|1ӬB'Ucf;Q~CC ɵf$ԓ:2W@cX99pmFxt0z7݄v{9rr]66J%9$i]#5fWJmȖjHG,Mt7ML0p([$BLq; 26,(L+d( '%pRWI J/\I I NJpR}NJNJ@I}pRHw$RЉ;O0~^^in5v<_tF'7je} ݢc.CPs?ܸBzcYOy´wm(~۠8Ŷm{ΦPʡfU rJ&K6 ,l6SYfa5O!?;fy* k)?(c(dF2c,%3 Ñ`C10?iB (U]Lqz ' D3Dn 9bqH"#$,b@#ZO6({]p|,0qtrbݓ񊔼PiW&0f$f Q||׽#@: ,?F 00 00 00A_tޅp7}6#'E4ܧrAOmSQgsֈ0"(|NwF=M`RdMhD,/VQ 3ـf+%PKoT;dUkIO)$$jdQj,饐R : h`p;][ڹ?fw0nQ b$8C80V ${dn[ѥYx6"ZRz56mԪqJj[8 S>uWLme`]%ĴbHmtS=f%1]"&!Cd1լq̜FhSaQkdLR.MH3V!Uꗵ&\Ƥ,kK~|ʾU#-Jp@n b 5 րXk@k@/~f~6|ӟmsVvia(r>nd]+u<^SSOY=2}pI X41h#L7CKHwI/}KɀTƽ0*_ͯ 8=D lT/)qj; !K&i ,b w!" gbLELbΣqޤΆN*L<ZPV-PmᎬL_g-7QnE $"켯5:g~rTlHt}}QA<@8R͙;}Z.)qG1p|qH1&a*pN%OfFlLfԔezy8ekPrB/}qaݤV2P5FDC6#FXd'm_:b&ɾ}̬:&nɊ,QVmT97ϪnﳘB9ARvdp p &p˅p I.GL>Ntj NtA78]qYƾ_x_`^o[mv G` =v N (\s0ԥ# ['lu [gLu"q3}@}ڬRl-q?{8Akt~?_-/X*-?\St8:11*rPb,F?MsϞ¶؆vmԺRiyo3/ xSw^^e a"֣q7Zn46wWt< 5]A:FH EjI| 7thQf 3wnt'6.TokI*9R;2tH\ KHd9tM2M[ U\[/%Y=@E[g:7RQ%d!wy3 <ڋsHI, c0u.?A jpPh jpP C@A jG:P{#A ~*kξuo7#ہ-8#YZ ϲPu%U3֙PG[I[7CNjՅ)mRmClUUIPe(J5̀ S#k[veH@TP2([s$ALDČ>lqy5}GI\}W159)Pk>Zdiu۬g[[(yA2@u;H%GɓZT1I^WWfc MnUMnt @ ͨ<&h7A9M^nr&h7A $evMbzro4JۛSi' |vkH18vDi;'-JRWUa·o*ԂdYo5]i->9WRbPzݟ2?,Vlf?K63?~#(E Qx6BfW]XMK0'RVuRW]n׽6)zJy #ZˆZZZ^g yUAgYvQ>w"LU։qgv+V.ޮ\y]~~GGnݫN;23ÖpolnFٸY6pxc-myv>1sv\I/!~@Ci|cT宷哴֙pLPYb%j9$2>J1LYL<-W J\Q( =/ etE&%F:$n tm`K)msw^k{W*+P~vc#zp=TIdQ'Wq_WxL"R1jn/-_Cbls!يm/^3.[ 幚U.1b}qЫX{׸xsaHSMe?d¸ ;Z:!*3JeCIJi#J N0%,'R)4[>P>JydL7T@Jr@D44>P-?mF.v+=rd f5x.ޱG|Kr|az|fO})8:zf#";=7D"(1ʈALm,3 gY.w$ X}ơGÝz̓x_tPuG7[e%'ܒ%, %B_rT\F0ܒ )~}{vc[|+TOqS6>Dj*|)u͡`qKb [800d ` FkfW%IPP\7G'8ZR~NEr^ i~piiⷹ\Ǿ~\nu Es~.zܜG1Z ^o9k'"ь 6 Ds&B':EZoA !\m[UʈiQ;Gcfku'8Ӓ]`u;N7$^:r`cFrH׋g.Sh#E oϓt@#js-BAom8˦'R_ :U  Qx^rCr.E Us%n on麪!&w/Ր |_va吪k-IժnHB/@tg2A>+2'EWs p%\p %\s9}]ZvKp V}6W85z{5[ܛZUkfsP7] b8otmS7;{>!?;CF_¤fYcSMߊK[,x>vQgy<-a"娹e:z>v(RR⋟!o/.X[wCd"]ܫ7pֽh88F 1? 3j6Mf5q_i@a치e((5#cM!> }N/$⋓!s}qM6-!P<_ꓚ7=GVxahc)ęlRE( jiKL#K|ȬX";/gkuUp6b#MPq\*qMH_N棔U`Kϴ!IxiU:ܦu8U@$Fa9ʡ3fu ?X\RR(lP&f.qB:EtRY)oe0}"Sě$,S /֢86D֕kE(UQ0Ċh!E!f(Q?4B77~N)I( @gَ[f@Rffv-󞛄 bmOʎfwBXBOy^[O46@ ~tqx1AA u"t!Y#!/(TwnґC(y$o@ mhF4ڠ6hA mh@ mhF4ڠ(*A mh>h#4v*{m?5Yṑ|o9[oZ.̲]Icn'ioz͕[a+͛ZDN&<\"KaRɆ:swo3Vbpsw!XH Zۍg.:R"Bh:_m3S.8.vqa]<](Is[â*U<+#EmɏЀ lqVX1׏>D:c}cŵ+7aRJnjչ7|Z̆ز, ח\\{{Mʬ^Wn{m2K7p[e,6ZX Z~sei>rEāk# U:DWܙB\[Rq\c@ 1A0,VgZ8R5vf£26Մ,j_9T6OEX5!/LBH}2- f%L+drf厚2D_a")]1#!U Cڎ @iDGA1ǰ.*A,9 \@p!޳_z6}k3#dY ?SLgWUչGj;wIMfYEg0-%Ec3Sq~Woll7^]QzOep?AvO!!Jä4eK|Q ˆ 9P`8j3%9#l-/u{m%hZpQ98V[;6lJ$n~~oN>u+Wݤ+)\-9Yq#Vt b80b6c6&qf0\:\QCe6$Jwb#59Њ@!NZujYmYֈd9=6/E|ve ?d&A$ouS,Y_R" |l  .GV_()XqD g )D|_0 }X} آt%9D0 XŒQ49,L]ݪIpCA}Y<ոqX^Av8tT kLY^Q_ c^(zð[-b5 L*L⒨c,S0CTu@LLD '}=n0>u~L\bw+bu\䪦d#Aq9S6L[3¶ȆQhGX>i¦mr PʔFF٨e+6YeA"QaV5ŨUVyɸ C( tt3d;[R1*GVs !l չ^hT6@$s0kB#ٶ!4XrDMQڶ4Z YwRVҩ'sj+L ?)=j"Zuh^loYˉ-fuznaSJ 2 i|浈7釀&uFbAIJO*DNIQ\;f2sz䎣Sj0pdw@UM(m'0j60j6 ڌ %Φ[`&QQ[,XS^8i& 0j+9`Fm`ڀQQuk| ˨8{F R_SmAq#x5 0z(ŗݸA7,-5KY"0ɞf<+ֵj7敽^: V9ټP$|DQK]°P>hO(iRfcOw>t˛YLna4Tٖ\ѡfP5+A*xAtB?us%e1m_|.jg2EtA2ۋ c`fyyyRQ q{̸ 3~OBDu׌HDL38 2$bj73IIM?Zʹ'kY{^~\Aq\ƓB4/!1|P5U8%dNY+ҬkRF2ut淮&Z{vq r[K &t!rѦ2*gNk s,f SR[6,$*_2².zbuǬ@&o]#3ں2G *KR(&ܱ}z]H޺,l:1<քPg{RC5E4 zX?f:b9o.EiBk eT=ǀd+XMy5ȅՅ C@vbal73ة/̸CDY'͒,L1--\lK#^Aa4\1͇p-Y֓ kʖ?ٺK1-C"2TbKbXc`24 R2c2*Xr L G<1m_5 ކtr䈟;EoƼد謩)PzM֗ "`h(>aQ( #f$G#gQkp8YλdyV${C?ޖ#$ o"{$}h#cl)fJ,)A\cRAeEdP(&;/pp pp pp pp pp pp]k:8>sk8pp Ar8Vpp=zz./!-yε{=bR|Pnk^3]&ҖWmW3gah Us% NsΟ Wol4ڕ:YEJma]'T>A[jER(9Fͻ5L)'SW3"ђCLlaC# v %T9vL1,<f] E3p#\CY1qN V|6 c( f c&Xmh{~u,,Vy0 b`Q-1i_"qEDG q4 E6¼B’Np,I}*1֛U1Q@~0B6y).6ԳPaHa3Iӵ}τ`=93[l5"(ŀ6qir 3=1}IGb'W&qy$+ (3$__Mfⱹf2;s̐ʄIRCb%8@oL>Gۄ&bua`=}2wҌΟ,QB1&Sµw1McGwL"X|Us b#1ݤDHdB\d34=rFE;̭----8h5m K- ---D[ -A'>q9'./--m`cfMMlylyF:g\jm7m7vm\^Y~(a$<( ßI xJjśB В\lu}3(U%[{;f 8#j"Ǔ%ߊOj'˦dnil*}Il؍#!emI]kll>RY֙`z鹞<%(,< 3KX6Ͻ2ҳv- ^h s}'M.80)]cjfh٘KYf2>wb-P2';<,xXC c!zjfp Łs2':vMS!Fke[^I(IZXE$ժvRFt*&.̴`=40ަgfzޠj2"ņ֛b;͒)}q7!o5D;9>+P SQ(1Ew}S^0'h A7OŮ cRy; \LB q4zƑHHqL 1X+rm=ZDnV)|rrDc_<63~8:lz$㉞LF;RfYj"44K4J+ 4J+ 4҇Fsw˶M/.W/E`{7|oM"3)y JQ\4+2D tҁK?\BT=Jv5ĕ%k7LĈKSF\nQ 0p pʒ8`l&'v[矼f>y8o༁>wԥx;6,SkaQoG4Yok4\4pEl.:89CB$lY!5G8E(q^SN;SN;SNNfGjo4wNk?f&TK½V?zῧaJ#$(^AH~cwo.m_/S"r^ -BO.:<#EiMפ[!G_:v00L$?qj]F:i;ʱ#739#GXYG!?#eߟ!Mu9q5Dmn,A}7G]+ 3#)FMSd#`$'d yf⬹4I#2KFXCc.Ȉgd/0:w$W#eDH%/r秧gfɴH.)#vHV4Ζe*i#=8d+cu+$zw L7 (o!T>qnU– %to *~*²Hm5!)zߏG(&O[F햫씋k^qIԆȫ2jy.N0z\xpU'WS؄k|k/4{NEkPh|D|Y\7nz6Zi$j['(w!@ƕxqPC6͛ HxGiyͻlhY8:!0!*o5⳱؝C*H]5p( Djg84&-]FNʏEN GJwBq2; X6`ـe X6`ـe,B_@+Y'D'!DU GԪSּ[{;ܡ;M;Uk'fN]5*G-k!4ZQYf]%wK֤6g[p}oVj.YfV%'.=Ri&$% yX="wPvuۀnÜ,l^J\t.P^5' b>[:{IpbT6JIߘ))5\͓CÿöeN}ʼB[R)Lsedq2k^իgr;w:1^@2!/?eLKF2mRvaj#g *ۆklz6/n"Rf0hGFK87QP݂UR43,D}}-Z*Bs_0pɇo_!4-m0mDXWE@~sM͔3 . e D0.as%S]Xu}FňC!^+#"Be<5Wg+b{+vK &.bkI1N[rdڴ(>v٦ ;9XcA&ȱ ǂ r,ȱ ǂ r,ȱ vME/7YZ=;*yV3l=&o RL 0&Ƿyl[ĹXD5 gΆ40pIO:NI͚T3T<&C(lj:i<,VC]8 H-!RxOp>S\LS6#`+2&HەkH**aYhq)&f&n caȒ́PGǁ Շ1-&-Zd--4vZtui,5}xTXYr\>xoSWc)PQ"~\!Z+1#C2*nKi~,z|&#_.C7w}A.uP!'z|2B.T$chE$oTKF(E78ݚwLxΜ-³r<uTARtaoU.ju!qnQL1؅{c<&qZ80Wi¤fYcKt9}inYSiNLOOzv#ieKj:i1:iT%2 ѵc6%ߩ62C^UZoixxا0^4 HT,$+iP{!&121b>#b~zn:Z_?_\y\O!ѹ !4IRNMiU–8 Kj2=D*E*Rn T_>L񼑥dxS"dM\e\78k^qIԆȫ2jia1,a 뚐O8'i-`'\0sܤk^{Qݣg GFhd|Yl )VZC+mhDmR(aJ(Q~Dj;ʤ(v|4,H:0%S*o5Q=[Rs̆[lCЕ&?=x7_61/b2b1hb Db_Lv@m1/ŀ/ƀ0};<6i6i6i(a@&NR6NNN0X %8m8m8m8m-}-)]xFG~~ve_.~G1dp:Kq~#ql04ƱJG/p|8l?x1a.Je/1**U-8oI ځs8g8g,prYHFQ(-njcYZ2tY蒽EuY\Vbl%8۸q*wz"=HQp(Bφtb;@@.8#(:#@m -75p Hu{\inyMҟ~'Rwq%Xt Dkv|DiCs67ge\+7'L rX+'ӡ%b)ﯝ}"A7KjAG\\pRݾ8 W,w6\n WÕ0 sesc2N"9Dm e+benCYv0~æ)V8PnrY˜/!_7Zβw, E,ƭ1`YfE`=YfQݠK=KۆwAhIh 1ÎY'GllXQQ t_ظk{2J{Ƙ7\\Hu"zv mN-/EgUj#RD^αUSaVŋ:} *~'Lpf٪hӬcFZd.UbɥerXȨV!VY XInTTS)iD9: lf&TY4]3h>>| cz\Wuuy&;)V)N%ERI&ibM'Kh_++reܣ׾-+UHû2Qb]Y$[J@` &@` L 0"N}?dߛp-K*d~f6bC_덽fz//gZŃ΀3 rLΜ Sq|1#VQ 6WL̕F V`Z*0z-mԭFO`Hp>$Ge2ILJjLJjVCe>Ź3aLYq=bօdYxϤJ9O)o$9ЧH2{O|4 }sc^iZ}Z)A"ӿi|S'/ט4BA(j\ ៀICO|t1ӟ 9݊s*jcԸ05/3~>v ;6>ے'zc,bܬ**qWw\pBik Dŭ @_*01bH ohe}׈PۈqJb'CT|0NN(i{H$~'lJ(/sVBH|r5Uc.9 hp@48)E\'Ԁ48 hp@ |ߡdߟ?l)'4lݧ"ȚmmmmJm/3+umWqkIzg՘ira62>^'pJ>ƫʌ̖j3@k-rowPD{GJfJ>R 酹K *"5(y75(<龪HzA؞ݮ4  a?ػCrQOr2pAZi04/XrrnW~+ Q3n9?m x'BAw]Ĉ_%g, 9SHjEb/ilL5Ț\1HQ(ϲj(%^ ˌLL?FO>f}{\i AKuH Z{!H D!H D!HFp$Bvւ3Q8r!govaT z׼;dJK烦"6}+KfGY ^:2ˣ*ErLq/ `Oli;Eeb_'2id0bG_bMJ5X|EW2TBwVi~T1JRhdSsϫu0$Ҥ9m&L V!5i ,Y drFd3.Gd(USKGE8?x~^rm;Pp4 .6W[Y_2&-]%vtaV9]^wk[Vnڄ][-߶5${L;߹w3A!&+M墬鞬) eY{q]e{v%SFcD"'f}igf}f'gYӥRNn YS׬deرv֛U1M?T.~&iLp6p˷.:\ʷ/R[И)P+x-Lx6bm] LH"b3骭y%S ]R5i073=/@} P_/@} P_EĠԯqtOaYmV4'&v.BN,ZݭLi ޑ_l O6wi,5:8]g-[2ޢkEd*udYNYb _*}0,VlFC!dC!l>)fiq7@["MVCB;Jb)+k ̽v=5F)w%`bʉG*zv;rwFߑJerxb2TǛ*cIq6VJ\`-[<3O1|}k+d xMLn-O$cnn^ir\py n\^fC \pyCw?Ni>`nn^\i>ۛs}'co37깖74_Lʝ^ً5C6Q89[o%cK}'fjFdP(e[G9)7yӈPNPhk"7g?|o08)S(IT)Qhu'%%C L̅? ap.e45.v[i}'/껛xۍ3oY=pn|g.ff./Ε2*MgւMY*ۈH?{Gs(ڷMPwxrF&)(C#$fHMqӠdX8Mt7?}ʧ1{pL Ti0zW:?7}"^j?J;,/#|Hcp"r|_$+T5ߣٯڦfSK#cc9*\x8?sϭ &1s~KHx0FqY9ecUI$.XY]f>fy^!Ei;Vl`zON?GʿS.]}o D`HH?1[:1)F"5սv DKA*z7ݪ#Sr(zL2٬m#њ`JiKG^F;rQ 5BG|!#ေqGGA7jL[WkdJL]@t|zͺmf) 4^x{ځ?Ab'3h |OSw2c][֘T^2,n/GAkx6GϾ<=0svq ff̟Z>{ ?Ssz'H>ߓٛdDtllzUFO:>N;v`%#R~;{uLhlܫ Y9rT_y_`0P}#G-}@~1Fo,d_αVhK~?֬M!'0݀C[0 @z|-Y""KNVn9t}F>Ə&9z&>Xa6UjXIlڂ"Gުl&T@Qqڕ]4z2['LO_.߄Te?ɱFIqaVHQ,>8Ǿ؉}aPpP :_;[77ovۛ-^vcu_/\^˽CЂg#&-;O=i2b>ߏ1}RSZ.ĀV6+Nd<59 F_A#_G##Nm;p 2 (:QY6w*G8ҷđ16\(Kໂc _ ?DTgGFB;GPㆿ 7o}݂}սj{imݪ5v^ E_ۂ^dž/^ߞEQo0Rp#?b̿ ~lQ?w DgH3ee/L_O\QC7xB/Grq0;+'< z?^BCIGaljtjaAvmTkUG{e*-?ت+-:E2W.vWlG>OL2 8p S $i!@E0)Ӗۼ7x?h-(Bc7sjWxzJ0i <\އkh70o^*ֽf˫6v:~@ 7tYvsJ#?z)'T/=x?evBx/W|,q0*?]?vU4&DtJ'q3)צ^B?/\3XǼN{S#w6ؾ*[QTwq16+;[;*By{oF=81.{ @Hq|Xep @#e仁/Ī-p?ȕԤ. e쇍Z4w?h}C?:@Unv7fLgoos~ЕZg֫IFڀOhB.Y{G>*vԼ D#/pԉ$r .ɡ_B04_PCܛw!LKϨPBS1/Q?gOπ^ 4z(?G:?}?})R܃<3n9~7CN}b4?%䍖,myj;VXZk^!F=zMvNmzVEG轾#xU.;C_Cm  BG}7 ϝD?"7H>DWnnz+mYoCOż`ub, Xׅ?wy^j5F _wn,/"NbgHUcl_KdUܨm{%b$26_"LAoҌ"'zd#0m#-ɤF)+w~ZxyeQ̈́?ƽ+e7jN/h6ܞ@ xiy־W-VÄfk  ~-3%%QN< ?~9yp7G_n LO c9TS/9JrJJVn!p=_=D ?h4puhGQt|ݫ{6E[~<7 8*.4#صfe/Qtrͻ5oNjNcK;[=$:6Wo{M1_p{{wkݯ}Y_1?ݚ ;Ni#*>~E8B1W} 7RP}lP}_C W{?X220y_G}'Cq/3Wحe t̴j '|LOn(`!'Og.B 4}~m!A`հjxW %ObwȐqaf˿?%3R{44a@كc1Qۍ}wZfmތѸw[P5hbQ =J{X`=vG#FlA)C0^(~$/ixԶ [k5e_3ϝzw4-os(J} S9q/>~sDg;6&(3C(HFܺwy rJ!s{4_  >\FޣУ.Z~ޒ.LQ ա+zI yExR׼mީu<:Z\BNOһOGww OEݧcOǥw}zTzIۻO}zݧǵwN}zӓ;NQ>/=G їQ_zt_}3KOa_z"#yO/=?ŗNė;NF}ϊ/=~\|wNnSV 5 )\ހ#G|XHi||2??4GÓ{w_OK>;!FcQx,zg~h^z%/eN#Qϸ>']>}C[sq_νp8 `$N_DIa (Ģh On>CϣV%{odsXhr_=2^V]٩zoRu ShϢL[a7ߩ[bݑc37^I3qbJ>HN 0C CkWWޘ[_\}9H0,p!ȁׁEC+'3:2zr'767~e2Cϼy}e{6:q+77߉:S< c;~*}F'EzOڵolԾE怎k{+ks׹ssO@ n_<knu &v;?!r3Ahhudlud`uJ22ro6v2paZ)Ϝ^jNȃ@X0tCoGD_!r BO|?ЇY\FϏ >Qv=bvڭw*5^)OOf],O/@yDEO3pLf6 @ns="BaJsLfS A77cEGWn/qt|cŵ TTD؏6pWqo﯏~-۫$zlyq}cifPqsK+x3?|++W7GcW][GcB^Fё©S_ -<ȝ@ѣ?D?C0~gYˣ+,">!x ~{vaGc/f a bq p(ꛏ݅;?vZm X\pz ^`0_vm9ͣJ*KhuubxT~ r<1BcE\ƒȘbG/:Q(!t މ8Gq?hsEt=ę7|e2cˋK6f>+xRT̵hk؈wpWh36F }@ ю}!BqbGWY E}B@خ~{vaGc/f a'qyKO4C!4æ_vwZ5Y}JO2 LmǙ33` iGx8j7)`'b]TL;n?N8Pr̎qC 5ܟ=autk /Ly8\ll |6g F?}U:|Cřl3syx'|$H  #'`B)r 0CHN/__Z1 =+/z:p?` H)Pf#ڵmٹPrOo@Zesp=X'<~5>kA|:?}'"ϱ#0>ڟ{"_^Y__Y^ci~-%VH`@?3'|h(pz}Ahhvr?W/nr#77د1tleuqys~-$hAnd, ;V,8!%~P~X~Xf-gn 'ݪ/VTjkڳ{[/F^i;4x5+jt dV.yV }|!_7E8galera-3-25.3.20/scripts/mysql/QUICK_START0000644000015300001660000000732313042054732017553 0ustar jenkinsjenkinsMySQL/Galera Demo Quick Start Guide =================================== version 0.8.x This is an informal guide to quick setup of MySQL/Galera demo cluster for experienced MySQL users. For more detailed information please see README. MySQL/Galera cluster is a multi-master MySQL server cluster. It provides - True high-availability (no committed transaction is lost, no single point of failure) - Performance scalability even for write-intensive loads. - Semantics of a single MySQL server. This distribution is all you need to install and run MySQL/Galera cluster. You don't need to be root or to uninstall/disable any software. MYSQL CLUSTER IN 3 EASY STEPS: Suppose you have three networked computers Node1, Node2, Node3 of roughly the same performance and Node1 has IP address 192.168.1.1 1. Copy and unpack this distribution on each node to the directory of your choice (hereafter INSTALL_DIR). The location must have at least 1Gb of free space for database files. 2. Start first MySQL server on Node1: /mysql-galera -g gcomm:// start 3. Start remaining nodes: /mysql-galera -g gcomm://192.168.1.1 start NOTE1: First node of the cluster is started by supplying an empty gcomm address. When you want to add a node to a cluster, you must supply an address of any working cluster node. NOTE2: Beginning from the second node, each node you start will automatically copy the contents of the database from one of the working nodes. Allow some time for it to complete - it depends on the size of the database. mysql-galera script will return when copying is done and node is ready to use. See README for details. That's it. You've got a multi-master MySQL/Galera cluster. Node1, Node2 and Node3 will now synchronize all writes with the rest of the cluster. Root password is 'rootpass' and there is also 'test' user with password 'testpass' and privileges on 'test.*'. You can now populate your database with mysqldump by loading your database dump to (any) one of the nodes. LIMITATIONS: 1. Currently replication works only with InnoDB storage engine. Any writes to tables of other types, including system (mysql.*) tables are not replicated. (GRANT command is an exception and is replicated) 2. Rows in tables without primary keys may appear in different order on different nodes. As a result SELECT...LIMIT... may return slightly different sets. DELETE operation on such tables is not supported. 3. Unsupported in this release: commands: LOAD DATA may result in a transaction that is too big and will be rejected. features: LOCKed sessions and locking functions. Use at your own risk. QUICK ANSWERS: 1. Yes, it works anywhere TCP works, including Amazon EC2(TM). 2. Yes, it'd be faster if there were a dedicated network for replication connections and a separate network for MySQL client connections. 3. Yes, it is highly configurable and flexible. For more info see README, mysql-galera --help and my.cnf that comes with the distribution. 4. If your application cannot utilize more than one server, you can use TCP connection balancer like GLB (http://www.codership.com/products/downloads) or pen (http://siag.nu/pen/). Note however, that SQL traffic balancing is very CPU consuming (due to high number of small packets), so it is recommended to have a dedicated machine for load balancer. 5. The patch for MySQL can be obtained at https://code.launchpad.net/codership-mysql 6. Galera 0.8 sources can be obtained at https://code.launchpad.net/galera Please direct all your suggestions, opinions and inquiries to info@codership.com or (better) our mailing list at http://groups.google.com/group/codership-team galera-3-25.3.20/scripts/mysql/mysql_var_5.7.tgz0000644000015300001660000060226513042054732021141 0ustar jenkinsjenkinsTX[lkY?;q9sf0I۩f{{;vlo;{o)$N\lǗ88B>@GB}A--RU*Ei;QHH\T Ыfvl/;qr2Lr|?xknq [NmNq^N){0FR͖ {T+m λr@m7+T?;z{`u8zjR9uz`r[>fo7Soc p:< { g= fSJU/6?WS,;e!r}SZ hULr>w%ccn7ϒRh)BP][ִRMsڢMRzxfV^[MF(-u1Vknm޽;]q[/WܢmEi0^HF`YMEImdKO*.UiEVgV+aI EK ĥrݱFpc5+|(Უ8BhkѪά(o|KKQ5pmI⒲4u-R9 +Z59Fݵ뢻X)Wx'Q9Yv uVشTs}멅͒W-iӳD1ͤ\8\x6rI5EKOK+B[/w. NgcfZMϊzp/_ܱq|z+qx\ T1K4up W}TX+J~5YXչZZ6!A++krmuiù&㵼Ӳ0),*Bq/gR9Q*No"_tSj(S3Ǫ_=ʎ%Jx -G衢)]ܒ,ʉp(ՃXP^wȵ%/4^m3{$rռ#Yę^ΦFPMFcq9[YKh1(O 5E6 @LĤ+ 3K.!*Z\h:?SOGҾ¼Qe#,JsIYvFJH[iAMtυrrw&BJYKBh\%yݖudmEnȩ֖s?+֋ʼ m(Lz>\S52W;*-#Tb'b(Už+8&3ZsT;|`#+ɛ#%:}KGgǛe?V~=H7Bꗕ}Uh`ja^VX8뇅Ucŵri.uXǴ-H>VVPQ&޶f0/5Qm 3t"   0i&S3Is 1CAAyvJLE)!)&4J&a3k1& x6=WHՄf5N~JdYfd!& J3Y0tyza71_41*' Ca#TCYؐnTHkHHxAz5 @}$.x0HC(/PT}+WnT9+Trru5NUT=[Wz*Jhq>\N_W)Utk) AAAAnkl   ^o-mAG6m/ԆBreA 4e!XhM4`6=xx"*HOAPr{˭|Z,evK.zf=|>n*:4    ͠C  7m  nO|~okv `#ɉN&%Yc.ɁLx׺K D06GaA ,>^ V6RBrDbD vRMZ`I`lWQzm lq$ >9<)h͜'asҬq{㊤%Y dڈgkJI$FM0CRi_E *a%R3B0H%8Z]PclJ+B,L[C9b2@XU* 6S({`2,}2m^2 ]0gh:U¸W YzJBPUzqb 4Qh߁a9N# d*}B:sxu.c27SDk93j~B?  LCTq€  6h <4mcy06 %c &'XT(rQ7ok2׍ܧN ^7?y;c&>q;o 쌏]ڎ cZg _vw|dK84¨ r]s08L/p=W8a`y  1*|?q0Vq(儱q*ma5 L;Ys]v_~_6{4K2'&0a5g>wn޶On&ݚjڧIs&}p3ܛ 1]p[29s?&fLNĄVIcO% t즤}Dto콓%9kn$K,Ik$mtIw;rz#gsΗvl?ou}Ey?V9]m.ǬWAMSZÍ;ybXajnAk?(xTdg3%MRHb J&`4.7L`?"Zt7h5->4`L`>ᒷK;YʶtoΖia>pu78ne>_}osg [mw5->?༞c!x`j'7S01J+&?ùA9סnft=MH!gL=;ȩAOr2HD , @ҒJ8~+oqm'WTj{{Qf(/Twa v:g:0UtOh CAAA"O.οAAAyωn}1[ ~A+^2*ƓR/PM2\lO32 +z+fs6V]0_(cɘJ`Ɵ$Q6SJXcCGѥ D&a{]Cj?.݅gݶQ0`4 "n =+|芙]g+٤y N4 i4opj,'G0Ąs _=_puN3ۜ0̄&'ޣ?s0>J &'L2᧜p }\ͻ`~,`b {𲱫 ]]>Dۋt<ҹH# W\jsEF:W鵼H"]t-)ҵμI2H kt.ҹ3<-fwT/{etz]Oyt?33M   r3@ͤrz   'u   aކ!?|p_} Rw9IOkeA 9<hR~[.>a|#t׹DQ1ljTnn@\.+EUUtcM?   r3!  c6 A_  1 ^gX/]:E }pV6D_]`G(3 "J0F$m%4,'j",`,'qv"蕏5,hPK'$땢tdIB0ѩUL%N Z8.L]Yi_‚ dC,$' Y:H*<<Y2s!WK,.,pmb#gB˗ɜWv6ږk:Fm#xXoX|cTK/o'GC/"ߵNϗ鴂ؼ.k鼵RҚ]9*W|Ԯƌ"t"D\,"Sp'fN̚s05[HT!@AQDF$y EdOϙ̜"j%~j3spY22/=FE7DOAE}`/qmXժYa WeۤU.,`{nQxŊY(L?]bGv០'i8ږ%ʖ֑jPy.G:Oٝ竅niޖ/Y aPjT,[cV}mvj/iVK6[VTrxRt:Vb^ 3u>dJO}S4HO؉_u~! esKU n_5b˚|%g=gʕ/&Vs)fT*464f6Z2p`ZTLG"j_ c0w y1mI^$!&KB\Lb~) )qJBZD%,0&̅YI\xRr%a\301ƅJB .%!4e.WEIkzS\$,5)E%qq-$3\xUXq4OW?vtc]=g{%ac}:o{>Uu\Xu[X畍uY֎G︠1j.MKĺHr:EY 1<#\A l|D }ӓ|GǫGCב0~VdvǘSudC cCԅSۨ(B ZvOO_+ȿU_IV߇/VBI,i[XSKuʹd\J=IV4_mArzgMNOsJcfkwyg==%,"?e g}=ă>AW^: `K$5yB?f ED QZ)iVE(\k:׾$?nEݠHB(89I##(-*QUR#BXY{\] ˍ;͖377\g(x8^-lM[{b0=OQPU<} MTiZXkmXAvOp"OJuw{?7ww[3s$gnv a4#Εg|3FCyMJwɟc=oET+pX9=|M7Oklܸon=i3m]gx?O%^g,luoNM8 q'@|w?~z$J>e3~z?Fݯ jQ8z!(Di[XSKuM6_.R9FP/(O*Dm꘹{4Vinli6ۛ{w)n%^;[,B}?33s9Ep6Apz6+i`?c'bcn\\kk'^S^_iԊ+ZZQ+ŲD)-J^8V5xZiqoIOҸ3hG|{4IPFR. i(ѐ h9{|Z8ͅKBD˒ª$Ąp]c\ؐ8nIBRHBJ i!HB&Å% B+I o$!'/H9S9's0^=wbNuR|b&$ayG0;:{Q#l5n4[BNP^MpA  BaOتSo}b:bjz@( (mltQ~{ _N'g[ݽ;;3355N#k(x?JnQ-lb.6ݍG>gh|0+R(m>jFQ@AFsZа.oPBzMipz]R)uXhG&(&Zo$͍jR6̎+j^Dv%`$ U1~&?^\ RB5]oT ^)b(-8z'h\_YWVN_PpvZ)sZ_K.ZISuMjDeᚖߦuA[*e+kzX^r.}nQ 3qQ.$$! m!,,[ȱlEB?NS('R~-+!C;c<dG \^GR;B*xHttx>rL{/ ;&\(DlZJ˗i"lOz.{Pt<jb pC.$/qIBX$$D*<bf?O̺L~d/110ף f>.LzV098yD`:=4g=88wvS?ZqMSi& WfqiOf:uqPe]Q)x-~ZҘqM .Vk#no[XA'_9==3 0?_s)Bp@{ EAwF/gהqc+Jr[i`;-Յ&!+b+-'QgC!.& L{3{2Lhɗ.l776Z6_ }?\?Ӣ$?y ;= ?A;۪A(Rkj(;V ȴ-z;u'CT2W\3w&wOlnمOO3 gvS/ϳp6p<LortO |#Nl|+HOI+êhUk5jqWbY$ڵ(/ us+c7eǺUNСrbc*=S)b^t~-C%uI7N~oŸb[bQJ,Z (FE×|RE1G$!.jHB"}tQbRPJsTs1/҇r!ݓ1Qk1\ J,҆ߎ9!s1msx2uȄ8߉;g:P5c44fsM!&L&͜Gh*̘'(88OmTuJA/D|ηR?ss EXڻFNƁ1J*/4̮ ׫U1&(\\Z$="vk1ihtױ1!̔~DAaARžȅ_IB.$\(o ,N$$E$!I`Nʆ:2wcq7yvX{ź(nۻ;;;[7[Vky~ɩ׌^םuK_8;}h$i^L:c3.3,$I@O]t:;3p^]&Ѽ%Drp?*{ wp.aOA|A==CPTNQUuN'ٝ%@~ꩧyH: ?>@t@ +NWrDm_~ΧI*.Vtzi!pٴ`Y/aˮF]/f|Cl␰) E)$oYXH˷<^-"<Ѿ..W7?ȉVVWm>Iq?'&_'QwO5s  u9G8vGg>}b軱1/4%9`PX9`َܡ9`9`crJ o=EHp0ؘ%91%iNzK25Er00Ws80/0" NgqwvW/?e;';.mfzwmb?By%!?@(|8|?}"|uys^kLmU?-Fm'C%kٮ 0׬-4 Iٟw(LYZSx۲okqn6=ڏ@ܵP'_u.9 lT<$rXp'/4e#7w|Ctm=\5@oo=znm[{F[_xC7CT7׋_o?([X^:}#z3cm_3{+7Fì.2yr/.>\&(:A{)t7WJ^M/jeZ(;RD{hBP%?^YYe؎h{[;MMNF^z R/l·|9A+Gg#ڿH"ר_e}Z+%+᛾Ҕlw ~ԪeXo5fb٨Sw{N+ņD D8U5 jU78ʭirӪX1 G:KUk4zg۪g-i}ʩbM1Y fENPC&:kr;R\4 @DJZ$-)h>-dr,2kz,8# B!=O?Bs>=NK$a$4㡗K?OAt%0J4g>Z#~Y VWĘMߊ07-#sgS#BKG׍h/qbN,Y"XⰕ!#> 8t%.QC:Y#݁㳩A^nh=BLgP =?s-g*ԩ:lrh{$e+r(EEgyUi*.Ҽd9UpW߫PwE*UrI-/}-aӶ-ۡmnm5dUDGv-@pyǐg}B+{{mW[9rltmn$hԛ.jn^w,O:m1o)VZ/F5W~*,)-Nhg9It㔧.#Ǔ?^oޟ0_$e xlXnQ5ˎ?)[VUmǹxۼ}+moM*􇊐d/jE6U¯(B_W _R, o*B-Q EHJ," _)g[Eaw?*BEFT1>!~W=x/J! ̎*Br\ ?)!}yw4Yv~\r<"y苊P`aY|/?%"ec]M([3Dx'mL0!8d[MKv,lWBݫDF%BآMG"Gw1fDt? FDvq"m*"D]bnỆlbVmSx,vm~޴SweuuNapwR׾XKS$%/+ F\WZxl:"qpkϬ*Uo[Y⿻>\ vkjZumf_} Ng6|sǟ +E1V5zҩ3W7uZȱG-Wr"x\?WUUrU+z\Պ^%WbW*>yk핵Mm/[N0S;\8vkjz551-U91S lW!7ڵV tʚ/q Fy^T+txT~8˓)GYL۶lۆݨ5*>ێk͙Y߇':r#e1YtŊ;&Vk1[7hseF1l[֠2Guh̛~( +^|*[iVnѻJJJONhpd'Yvӕ+^2G??*Gҟ0_$] xlXnQ5ˎ?|)[VUmgeNAuNV4o 6dݯjohoiW![+B EME(p?U"P3F/I;p#RwE(KatX Cp*B$E2!"$yUM,LGQM|يq aYC+[ZkZt,hZ͢c)#qG"GWp,_]pEdC?Z|"DDz9|t>z>X`_c'FNN=;̞SOj9|]G A(9=Y׬¾WPW"eH~:kArR7ްb5bVzYzop~[Y]z{7:) Qϻ KB?OM qz J_1wclo\S_vLtoZNCߔTS_Wۓ~tO&6#>-Vע>Fa Շ $?{WՉkrj7pvml[W~wLcChҼIuƫkibkuI8T{6ؠ|/2@H@HĄiBBBHC~IT~|}קgy _;п,t} !pVD'l5׳]W+@-Atd АZު4}hchCh| -*@ڐש2^TjuԻUs KLȳ A &Ȗ?0!$3wdBTkDŽ%n&Nr0!;AǘaѢ FA&xp ^y-Ll=2B׎NkWՍtϙi?{TP4~Վ[kRjNMՊlyȑө]z7D킅Yۼ. .G[h<&m%z=EyH YKk[^h^h^H儜0.HҚH$_Z=\gǕu# psC[[kON3kz|b^4_B+pqSgw9Fwbf?Cr[/q[]LFK7`Q-/]+XSoarh:Q( )LAZ}F#竔Y86KQ& |gL>#{`5c}L~J}cA~K=Wm6'%|h.))DѼ3E򆈶!o>۳ÆVgSk/~kb|RLN!'ցnA􅧇墋CJŦj.vU}u9[z_]mWO[﫷. B[CeH_9euARwyHm WCsjc觘p-{qjyLV:W^W5y`f?WOP_ۚ/m6淏wb!=p¶ى%7U3BlZb_zObq1'㿂 cn3yڌa6co*C(G5 PFJQOSEMY6&kHQ RBl׏2kHk<]Ԇp$'JMr$7jSIe Z^~as楰9x]Q׵@cܜL} u-ǯksZZ3>6~]Z_FuO֒S4Do2)`1t!jj:\PM'B ~9#L#6g0"ǘ3Lu&DBM&D&ĤR>F?f]s&8k&8{۔Ÿ- - ?-¿-JacP`BPgˆ>Ą`BX w1!"bBTq/bRИ1&Ja mel{VTs3;hFE*Z7PvOlB7i el'O(P<;nvB鄺 u|&ԭMP75w4ngB˄ uo9trHe};o ;?߉iOwխP] _`0hZ:ٽ30?v:G'G{^Ө4aΊ*}rA$B6`heky鵵nM,廆EPR<-ce0'"Yu Dr1D&d6#nɼ8NSɣS *]:T>miwb ?b~]c V>$x#ա9yKJWz-ζxGW[-+k~O#d-,?Nzba{ F7`D} h:6*ͳ*ի25OLM Tlǩ\ ,y$7/mkO܏[Jf Lb6͑*5Gv޽F%Veyb ?_Ѱx1|۩ 3aq`2 gB^Ey(SGhyE 2Y8ΕWfңJ.vMLM#'wj~4^jCru˧2]+d sma`00o{7^CTrt0`NՇy*Ok^%r3+jiymuxʩu]ǂmT_gf3l랆 _z8_\/:}k,N+_lN =aR;^3`hT-hw/|"ҵ9泅"9ɑ"BA˓"x"?ZJeI<L,\}[lBH'9՞&éF\8O O|1u8L5 ϐRy#ώjg~|x\˧/ Z|v8Y$e`V/.x"M1E#⬖(Ǐ.U/|.H}oO DNG+qzbOfgN[ռxT4_V38/gݦ8 BЧ /LaD?=㡗ƌY_3HM!G%xo&uqA'߷~w9P(r퉶'ZV`KN&bJ25&]`.踮_1h_Q-'Ab\7+RG2ټQnl>TY[{plX.;|zk,-M4=S4ޝAi#>ޙMYʊ9kEEϔgFK.P3%,yZ&rZH !9R,WK'+mPΕ*+fu\B8]K-!Jvڂm3S>k6ov"zu_nv?175{8?#ytG/1v? T` z+oO?2+DI;T1`&;qQueYoNN^lQ:kza2}Hq*t{:jKKRJQ~X윤xY9\.̓H %Eٲ^A]ǴuT /{^ t Z!$BI-)4O#!ӳ-PݭR})(ި~%{qz~ƍ{ˁ53;3M% EAzG 8`=S{a;14}vobDsI#@!5tXo.Yʋd1[)eV[(HR_`sWnc-c~_L JIWj O[](EUZMBY,6MP~dd)BQv򌹠x?ťOgܺ"MGr)|/׫5z\WŖVMbL-Fi_}"zCe|^-WO7:V)}]^ʸ%4ԫiJ;Gj.m-z9 W jr oJ:S6EI^kFҔjXހ|ҫއd[m,zZ;9[j4yJX}g]X/tcѼ`;ӵڢTpF/q 34 SѭV*sOMB1)p{jja-DCu=]9et_YB]BHdX4JtKzcC`dhtg$?Gd3w\r.}o+9σ]GA+'h{aDaz1-L3/ N!ۈXB^ӿB$o!oB -l'!$H=![B9[Y޹_!‡BQ/=." y!{9& 7KhwC,>%,mvA9[u|%+5[jD7UNNVv]V<+wmV0+WUV$Xb|Xɲ욪:UrJrQէ*UzN% WM%JK-ʩ)U#@*oSRz9=k,u.rGR~|ԣWV׮nlO]ӓss'&;q#w$Oo#'dP!pTozԠ1cMϕeSDΕ*+&V #oR{~u ~uh9k7nnv|gN(p}TG@?p0e[{a96~oOƬG/Zw:Qaw {nLce%cY.WLi$1 -WK4GHtzP99q'X엥_Ǽ"/S"Wrb19y!ʊ+V?L[%ucBNWНӟ/GVu}4L +Γ ʳwl:KHz^@7>¹ SB0/8 KBHpVIN^ r), ^Y>H! B(pPKB\ a_B-!ZW~?Kȗc,-˻r yڟ%L ^^2%k}!(QIz;Dr]4bnPV(_P*|%B 1!g\13N9u:_/7MJضJ('i;S4ޝGi}33s֪zኞ)-9GEόgBK.Yr0혧ϑ_&=5}s|s|UMic;wIK# oJ# ?>G9I# #},TzP?-z?ͣz9m:n>|ė]!w!|tlh[ ݊|?6! >6&:ڊEQQb9TϵPPUP%Pfƽ=Dۮ禼os__Zk(K>_ˠcfDo'7Tk49M]9p_\ԗ}9p oߤo7ݫo7۫onp|n>~;ѷ{F"_Pna_߈o7֫oo`l [ A¢wOyudx j{A X{ɐhaRBHsO/!BxA9e! KB(p~+B(rׄ0)}BCBa%%d]^~&}O?! ybCaq8 Z~o޼>sYS(??q0,|uAtGemYPv@#.h|q4Cg.l)\B-2/YW׼BiKd<6ŒI~/~g̙3s|_ `mn7aMnoWn?.It01S~׺RގQJjQ֪O{{5cXBsʢ$e\;r?_4+C)//KsuM9OX6)(?lY{+]mܮoW[ ڮJ٬7+5BnMwl>mѣjf,Q c)'&(&{$J1LSyHa,]\Zed`JE`R^`rrzҠI3ź]=QfMsUtuER |02B+m#gWoGm+0ll akL$/B1opaؽ88?|(*7.xB= $mkX5NS$FYsa)DJ${!Dv_.>R܈5|_BR;ly!˶m!d8Br[c!乎 w5!9^7B( %D2$7=% %!TZv aOOnM ף{i-|!SBȰB,=|.χB%e '{G Cv! ]¯erdrm4",TdaR|'47V!dX8 )C9[ſWJ_+}ͯ#J_(}ݡJʘ1K+cfVl|y5tqe&W+-͓Xm)n6eH ,U l^lVjؒkAm |#`ղyVdH6nl ^WžXu=kِ:V[ eZJ `_oUuD/_}'&}!;(ZN;(%rA `g]o9j/<11n?OG r򟏄E+u//`k[_Ͼmlc#C$toHF*~+^^^J^J^JQas#S,XfeZ>[wJ8Ӛj[ Xn R^eͲ%0eUiL϶֎Y-*/^I\Ҳz\:i7k-PQ:Tu!Qk.DIkVx-2j4{Z-QV9n;bnI uв{06giYIO_'0R>5"\q>/-/ yKY!rU!,_B&k !/ |B(B('Bp,?By-J-!*o)k i-|K/|K~|K[ |-->BPހV~mm*o*9po*m*xU+ V_a1ѩ320 /o!ys]f0V7!??j{wS#Q$t[h<8;)Ӿx/?nJs}a-59ٶZ?/+*v+mM#^qjʞqG˞q rjMftjMftjMNש9 өY0өY0өuj26Uyө7ok8J7D+aWoG-uZ\Ꞗ gc.sr9y`E+?@?+ WEm`{skr?o[Ejnhbt#ψSԺg뭶/#l?ygi :El=i['Zh[N,'I]ɵJJ"4U4όqs,\ױuִ(G%/)J kjmُKs"*<I!8/PdsB(!Y*\WPe[d)w7Ox^doK,]DS0L4՟ !٫ !٫B՘{,%|v M'" 7.BOBPC'S%gRO?*cU|S2o*ߤSӥJ(oT)RGžy\ݹPj6X1eM/wӹfnXw :y{{ً>_V?:6 `\[끾/5_V%L[܆ J0ѓOQ\o)YMSe7JɁY7FY>wZPlBsʲOq_nYQiFTc؜ŖlOZalܢ>9Oe]s|ÑgxJf397t'BK}?˼[·rԥ\\w#KtZ3y`|F`? V:a7Uېk\+{l#]Ob;qq"] T\$mHDTwl3v 8J/?(^B DDaՖ"q[PSÜ93w7{w#;gΜ98_n'ٞ&?L2ݿ Cv+i!]|،o*9H_k_Wb,dU!"=įK\ⓇӒ+^%s*TZ(JY,ꊟUW^ueUW_ׅdsեFKC. k/^~ΛCIj|Zsn(M)4%7?Cz]K"q\F-K[ ^]wn.!hoi|-m¥oig'D[&\-m¥ u]e7oI3!L/wUNp2\#" "ÜF0YK9˄=e5Ne~X('qNgB s~ &r9a `g'8XOs ? .&|LxB/SȄ槐 O!O!~ Y䧐 O!O!O!)d-㧐 N;7L;9rk65-ؔ[TScEWhj^ymLͫ25/ԼSbLZajYe՗rK)ԲR J-'Nj]8uդ%ZK/ԺRR2Ik$._uiuZEj]nO.Wou.ll;O~RWf5'Bz5Xڻ=n/ab?VkKn8yWҶ |\=X'Sl^bk+I\άKb6.ɹtL7Ĵ(BՅTf9ҺH]'J~+!=ZQ]w^yNt1 ?f€ۚIhW5]=w-Ћi&iOFbN}7˔}mW$"cz5X_ۻ 7RtY  +Gl=D;cF׷.EIJ+E$í01IVTbbJY$>s.s.{.g}=w..\yӴw-eKEUT^x$/_چwa6;sLZR͌7?3ό=8iEzO\>:f z8A6NУ |^˜G!'ц z_p+.?hÿZц zrmG:\.]MugqkZ8> .&2ݖ)~RI/|k~b:k_ 54 5WϚ?H\:O F',ϱXϫ཮cͭ& ^?K_gGJ0pL?;$;dOY"?+I*,״VYAV*edfR\9=VCCs 7gy3C,r/O0a%a!kמșp30qpy:{ŗnFG|?V Bm公$Č @[ 9Х }Ux j>~BS9l7KvTf1Puh\9nz˥Rsm'‘ KH(*]|LTO/{iuOIw`p9g;䭮G݄?biӟ5xrGNAE%NS Us(0تqK̕[z2l~2d="-}sȳD,AQ$`9.iecMt=^B/eNpj]J/K6Ke,!f)6Kq߀&zNrE7.brYϡkY34۝MGF_Rڏ%ƃc]n\Ӏ/=sUޮKFcv'B%+<6 sX? ?ʙv& x:tzw` |=r#hFq/e3qQQf%E]AҟY:R }qKKŧ 5/\iiGO&"'x|7Gd$i/+8 KjH"p i{i(7s9m2gUJYPʪģoj/67I%>T$Y!VYd'^INV١6Ɂv ȢJr 4kzh?ִ!/1mRlQm.Y!' p9z4 ^M$MYz|!?n3񰳬r׫ ۜszUfYzfYJ_jpx_V+,-,+R'必QJ/ {OLpY:9Vn"?sgIF׶VKYv])b8L>'39m3\z?-ɼCB>+PcM}O!r9 O%3'Äq v. }fƥQuQEQQu0WXٿ_7>[y?>/ `$ Up;7_YUJJqAuՓd"V¼t_yS˲0/慣M'RB(' GGjrǫpijR˟Nsر\JuY j19O%`iNPIMēVGMHV%ML}Av@vXni6[[HMc5'!ŤhEaxWvKhTz9I[`~t4:[]۞/-'6thN$z^SJiH,nӕAŔEyVIGO'KɼNI5!.6gsA숺.nZi~U9jrmKpP?Ra55Z_^^UN=TpM<˵X82:#4Z2حBŅGەj}of!YT&K3?:*oM)3)xs.)Sy'"GRBbLŕ"-Gb,~1I{Ex4.O'X1RQ%QNUj)u4="4ۏM?pϷkG#1nZF;]퟽S;֝5yݮyn35'dxw(i}k͝gu|P{l[sm_i74N4i6Ii mhLLB떌Nd!ФI c2-P@yo({Ȏ}qlfvV~ӟ{77vCDDDDDDDDDDDD_ }E}˻y(+ӱOo?:P43ij4f+5\AmF,3Զ54WPii= 4 jN럭qgih\3=5\A/λ#;yrU$W\~J*Lr*䪔JjU#W@Z䪗k\ r͓Q&˵@fvg'CB4WPH jko5\AmF63v4/1\AK4WPT ji_f;4WPBr3ԮiUWivFf+=G*3Ԯe4WPV3ԮHq@zf+=Wf+ݠ63Ԟ j7j?dMo7\A0\A4\Af4WP{F4WP{F4WP{F{4WPE3n jiLshLsj?j0\Ae*jwhLskLs;5?b+4?j+5?fګ4?n]0\A4\A52\A6\AnϘivFg4WP$)Nf+^ f+ݫE3>YKf+Af+Q+f+If+iW5nڛ5aڧhLsO j jiLsO j jo jo j}3*?c{/M/D4ewh>{K.]&Մ|L?j{[ Gv ٖF}EPo>0rC"uO?0!f>,~K[/9\E1[0[10`.l\فsJU]1`Ŝ6̃0oǼNØwaލy{1ü(b>0#b>8Ob>43b>< /b2+b:oby mw0c̿`o?1?_chᮅ޻rcz0񒶼x[>lY9_wDd """"""""""""""""""""""""""嚵f~y[Ho5Y-o\L\%ѽޤ?299qɩ?e3!>w """"""""""""""""""""""R/Ͼ!y7+?0 ~ ll܎܄ǹ\au""""""""""""""""""""""""""""""/ؾ`/zoI: _(hao-ɏûEWBv;47PV#D#-'vF F""sFVWEuGXo.g9V]BN6oIsM=87SXMspѰ='m׳WeY?,ˋZySǒ*M~4k}֞7mNNn/Uh6̞ͱO僜xٙqY\tƆ=26kH,}|Ti< &EK&~jݎw bIɤxRaDADj;ѱsCcѱm0jV+a4a] ;m 9:'̫GԳ!5Ϟ y਺lYy<]U[kʊ8q&sXo9zn^m :!gt$?20f_#%}~s요X֬ke֜xVi ׭]1P!ns0\x|`$49ؚxlYD=Scgm>q}lP#DfSsفxv̶dl"\lj;?tSۺNjKrm݄SiWSζq3sxqmM}%6Gݘ9)ûmW/e_(όخ0,;eKl׏qz'lKmם82~yWخ8*tzۊ̻kdɽgɰQF`wⴚ {_{8-f;q0;43 HeQ%Čj Emm#mps=ro@{pDŽ?~Wl|Rdn]yn|wTAҞ|Gu1j\ۖ EfGNsӸ۵9TU9,Jh桝@-%ԯibz S߹T0~T6N=޼6e': m߻7ED1֩z_o~<6QDmP3ó}C|mhxϊ2uNL/}hwdxpZٜe{ 6=ggs#"]\UQZW> uthnhx({@RĄS~`q䚸([F>21ksG+HLˢ|X;{nQ QT}hdbZ,76-\ث1jrMdWzONs7!SvKu.ԍ[uc1ypjKAZ(kQQMt0(;]ҳ Zr6Y?dE1b\}C[ah͢ ␤/{/2CEpZEjbV>n彃N#ZT~W'9fO)'紼γ_#6t7={6zGdh=?F] \f#sONMw 1͓&2d;%A]?:a*:~KDU)1HR`|- ## M`q_S$Q`&x P`BZ $ɋkTϴyMQ7Mۓbdnp677X؄ϋ귈IyʹE¿ Ĵ f(Ws\ȹu1QkS ͍W1ڢ9ILP:~Q .} շ1FC(]kC)upG^ګ5Q;GeTˑ͎}lPXA&uQ VN>ړ y0XRgW C ϾI7%xVӢn#VPsOM;7` zF'NH-I׊WVP7bpdz~J}SKLϊEN f!,"rʯHyܴǹx5hOjj;vN!yGo'f5L\ BzC,ZnDޙy΋ɿzXbʯk%0`}bTu7L~D\4oo=dV/>&AmXx3M̈K4ydfz|jlWG? ub/Tpsv]/EM&8Ɇ~X4~81U #߉a5J Gs|cO18yxvz~>fw3;!Ԭ:b6<>794vڽ ӂwk)] kXȓ?.Hnp;3k,Z.L?+V<@,8谛c_& K?`lv^8N\4wM=ˤ_+cz6g? E]N 855P2 -E"uqKCadbPwFX^i.n)_ohvܹ_bYsߨ/+DQq_>7a=xog`o|> NgO8ӍeޮUϫ߲ިN*e-*[`fs}qKQ}!I'V-| ˹qUˇ҉0 _ ӿVU_ek e9ZZEIi{ ݥ3{M MS_ˣqg5bigSkM 9鄷>>Y|J%'=,Ak ef_F|IYimȫNèI:w8tYJö.wnijdPKkW-[o,=lsW+<;,틴Lnxo׀4QFՅ#`KS,e޾:w&R]X0}V%2,Q=gz0R&g3cQr9 R B :g2NzK0ehG+̌e|jϫ$aeՏ*P[~)L'ʨJAX=d 6*m[ƐO=BQjHAeX [Ek*lt*Zt\Zi2b>ǣ>Eq>'oh2b-vrq` ~|٨alJM`T|hVGUsJ*<0pFSsJVw zeY\\R*AM"whRv碕ܕSjMΨO&gJG(]GD"粕~?_5\ɨOe+ ڿ6 ;v8k}+Af#}NUq>׭]ͮ\h},eRV7ƢM|ܸ>Ƌ%PIz+s-Lc-JXW؏O<*ޏ?%sZךL. :D;9e٬U&p٫NSšMZS*=%s2t?.)e4n,ar {Ly\L%}3uR wLE/H~R%sVY6ȉ-#w⨽l>&?6{ ֽT7l^2ƲLMmaZ1)zZ~d 2aSWAkš8^jvGA]d( lQSH //h< SIpeBP)Md}XS}م $bO9 %iLA2PIW?o*V"RF?O{I )_xoCi!.e!4lLdPiM,$UA'PTx3DFY8K®牠 IRn&TB Юsf&9˷fr)MjwVD($Ol2V~'Y΍2?ډ/-(^~GiO@?n~ͷ_ ?E?n>YLᧈ 3U?oZ۫@EHH.8, Fm4àpP3%P&d40!j|*_?Ԑ6%vǰivW5?hJ[!w1Lvi@ x?엯j 6͟@ qӽ۱iAa,§1 D0U2D*9^,\"`W6?wm3[ٔW?Ԑϟiʫjޏ4U5k/4k ܙp>|9)!w1l~Vi>W?Ԑ64U5?b}JsV^ ۽iP.X;JzL:lbqq} +A&W]ypfxGy{sؾ!(RSZ}CvQ:o^?tasܾ!(Im~fuNW=de[}ۿJQ;)D] "EFB^V|M3U&oXg*8E-.0̙~?.q( >ͭFar kͭFa??OӸXN;|xoXZsLvܾN2g~P̏5@|Twww<ƍd!bq]sL'w/wWC(Zc 2+`"ަqHV8{}צ>uӼoOx۱JQ;Vv$fHAI6ImN l6`Ra3+C X /`kn0e }ݙüR?zr4>R?o]w@9/?OmGp?{0 Z4@+|^J<exۮPt >W@/?ݘ-vq'yP\sRvq3βeliո(z)uvs~?F&j(z)u`'c'[0jiWZzcm?ezڮP4@+|͋Uusne;fR(zp*/p΅A hժOpJt)LjVsӓe9ؓgKn.t8}>f9|ݹjx{P/Gz65y([@q f 办jݶ>\и+` uuێ/b\ٳYm=EQ.muh\0sզ/߶H٬L7XQw:4 HݹjKScU=o.V5|\;_m~vtzg9TQw:4 Hݹj39T/o\xCb*Wsڞ;h[tyhXx:HWtyk`z(}=}#}3=Ioo{Dz"=~~JO__W7wlz.=~^HLJ//tz)5-=#3>#ee.Nz]z}[>MP:7oSWgEP_=QɵnG;v ݹLJv}~wf5)bIzwZg&r=3%T0[\;{ƅj\Sy;W׾&ǯn/ G}"'Yb(lqaşBG!雉q(O™أFh Gz^{ ?煉i{**ӕzj'AVlǎlDg+s3r}R*˵Tt4ܯx6^O cLVSf:/j3jYjUrͩ'jsR<ء<|R}T._XT|G|{WOm(p9a_b8>N}C2ۢ]?=ؽkt0MB( }kbbsY;Օze}{vE}^lqᯟ?ov0VOr!$·-n?^y7XNMsva]-.|-}liʿJwov2ݏ=̖g\ g۩['N:' \73KnI~<~l9;o!}l@HfbJU'z]rӯ* ޿~2@KA/H<84}Ul53[CdΙYq-5NOƩ?RZ,ĩx%,KZN@o;śνk7tWܹ7tw;^d M%=di3Mr =b_N5{xNu%*;*6o0ȯi}ә퐅oq; ޙX6sz~յb2ۮU??,.7W^:AA݋_atr&6iY#pxMU?m>{=WLn813;s-mouW/x. Opl݆98zU? _=)> !~(ᩐ5fb;4~vNo*_~gu;:6f:$<ąW>qJ?pmp<Ey#|UOK',ԽXMk\E𙷛#<UDBzڂ &4tMK ^Z(9`"K,mf_{Qovf7̼=pV?zwy6xrRQg+{svrK0и6Nϱ&reʛm=y/ 6l\*n돿=xIʿ?-lT7ߩ9?k M6M09h69Ƀ* +Sq⫕`~H*31*I8{B(UNmOmtBؕvbYZ>yy=g>:Iyz?9MdŴ67NV:2&e؎wF}0/}+.ͭG(dg/^x ܱUߛM,'2H X@CɿpE0a z Z_.XXZZl<0M4᜝9//Wx C/F=n}.PڿG>HOi8uQ,K4k;;nr:RoFȯ6'=\8|:sN̷|$Wt>[Kv՛U7O*fo,mճ?p>U j_{pYܹ['G>|un:rXo;HэMV?=kG-;A8E vLJTpFGʽUlReP"gY"=VSĒ%fwfnw~pߺ=E$[k*>e?Z2O7< w jѦ,O*T}nxw5ް/ }gM]6_^Yg}GZ2O_T|\Os/NP]B8}(/t@Yt,/޾ܝ-^JJx-KěYc;NΚcx?jW3gwQ?O_Q{t34i򿧪{s65c'wĿqQ惉_2YĿu`qUо)u7˱Fsߵ;Clw'BUX筧*;}:81ʺk|r# [a]-TњvCu~O}I8>kpʺ|rϖCג_!Eb<kI{ϡ/NP]lp!L x9 qsgyŕvgv7s\T#L4{,cYLx/kvǭfCc,nWa|{,g#`?ܱoWljR UuacjQբ6&q0TlX`SFvcN-Cz>ǎMQZ5tO~N{?|s|}3i|uҩr%?2NgE)Yc'=bNg>o2~a*_6'SC'>r0W?bXiu.Cvw9\a e?_>Ϣ+J?{C'ݿ5F?,M #Eq}T\aa0KƙP%d I~ܩ/o!ӕfxJ%J? _~?~xJN߿eNW, ߞ͵W:͵zgk]nt˸dѝq=K/kYZ-&q=jbYZQvde88{h(>͖T|_f&ɞWr4KwL?oC:*q?^h=+J;9s=x[FJ͢~,$qbOdv>Jsn8nmw7vݰQF>J??{?FOkGݗ4慔A^,x )=FMIګxP R/I`oBt{ٗ1 Ĉ~>0;SߧJFꛏ;1^Z;Y(P$psߑoK/ G{}eMr{%>:uyq !Wys"$HcV? rNN~eߖo ¡}y=?Bl,\]2b07ܥŇ8ŤP+^Ȓj0ӱx$^kYm;n@[\ǵx+G_t|Žji|RUV󇝹tm;y̩uѩϛ_C_E~'Y'[<weoWtS|z* a*xjfCy~G-}ƾ$a5ͪp:uB$3!DbK+I888t/t!4H( +WO._Y8?7:dnF%ϒj0ɸ`kYLR Tx-x#ީx'φF{Q6#~|[ͻ} gY5㓽xo|g?Žj|z*}z򿫬;sϛka"=O$ï8_@a\WտwvXx^魄JHC77U a&|yes}ynճͬf"Veg!q7^ɳJzs-ϲT^|ls7w׎^ wzhu|4׮Q׮~S=0Yk@:0c>?Ɗ\ n#%^Y=<ʺZS ⧆&kgC24Lrm? ڝϣ&Ã1^kiex0OӱJ\/||9x|9J''vzl8;ɱ4-wc{%ǒө<ߝw'>-ʺZo|? T䗓x!|ciې0q |wb̛b{&CBT5_4\\_^[l3kY뛏}_z۫V{qyE~BLyWFZo{vا'ؗE۫l㙅zѝ/RSj9[S-,eT7z+lcnmqX\;2Sl0Ïp0䉉ņV'ԛk[[:E|\ǿ癅Bc9/b7sL2}=2ɥ${d7~05wzhuų}>k͙PHe9[āZar2PϏb`;oC'|ruI8׆C$/m0O?Mh\EyvͶk[Z7jXC< B!I$MĭXRAzQ(E+Xг X(AdӝX=È`3?Խv[[>k-S!IXO.{ʞj ߿fvސ/t}9;g.::505{&gV|24x;M`Neq>Wex"7ʰwVsˮ_ta_/'<RxAc~_X틽Ksfnzr# S쎄{į$-ֿz:iԽGyoYl/ CV@sWz0xk/W {GJ {3?}b{ې$VfO u=k_e7|$![ݻ8{d+S 3sӇc+>caWcBNeq.We xna;y x8H|'~p4~2_syCdz)e!Ks.Í)2i{:e)=˛˰DnNϥs ' [aZ[.í} KVy{nKT[2ܞȷjk?VVTOۛճ㧪NZ<S=[w6VO M}?^:yh/ڵ0]/.c=࿢t{0ȯ{_mn^{cИ{VGbҜt;!bT5kGc`*X @͓Ik1j z& Ha=,({PPz-nI&j';|yw} pc|V'WW]#*Vv\9__`C$k+zG9}y ؓn,^[_ڗXnT$1?K'<SvƎNa+A/IܙCpjnv_8S_M:49}DsL67s*w(ƋY82lƋf/þx5eoƇy (+0ż GiSe>47aNx* i,:46.U̅UѰr@}jvZo kܗ1ɯ}N ˡgW;6¶G/vTK!n;yg[yQlD~Q?HH^bO}\ͫɘKŕCB}TiUW E?[o|!w,$+1:tO H¹nRxDol+^g/Sixب_^\VfW+s/ B)rxq|5v{\dNjf]RJñ?x\u~~6WOƩx#Mr>?Rx/-?R~]7x2E?kkkHɚBL"5~bjCu7翑F7I?lmܗcء'1i _Z?{"@1k J㝁4~ukN? E~?Dol+dϭ8b~yzeϤ1p|ϦbgŴ4ijJ:2X<ھ╸xs}=J6{'Zٜw6qk>DZwO4>.mi @򿣗ku<-{/ݿkuRN0A<$w928:"ݎk(Grb"H'K'3K@K:B@bBlsxINZy#<ϗ'?|_~F(b|x 0}awT3.J?Tֻo[l(M۹!Lv{͍f/iu6{Kb4}O6Άzʹol`B殺NڝtK^F9MF=u64f*G?p`l`y00 BZksnql:&^= ˶'I ky޼Qyxۏoh:0x?L=\ߑ_eϾ33g P>wOɻxB8WNiy8g;XER(O';NLLRw>zb|rBzZ_AK8?SZ v;^W7Z4{|r-,NWNA8?SR c0~yۻz1:߯OWnA_?_Y)ByZksPIxcwgFQmd1>=0ky_ ɝej)gζIȻp^z p]\e=¬۸ 6$Ud,ZB& 1vJWv;twc !P V(Xtu}f;-j-kflCg~g8mO֭ҽOojor]޻vqNqJ[y`uK|򵕃SQYyR/-qձԙo=ʎ}[ݽ74./y_Fu/F=m!?E_9ܼ]kTߢ.Gg4?ͱ:r|s_4>y )zIts^џ;`$xضDkkOػȧvn~yum˯G+'OHEe%硼ku/tK{u=՜;{V{)*ܑ3ͩ'& ',-O| .jW L>R/yo6iu3?6{-SY˧f:s;}=u{ԗw]Ч_MlY,c;mD*xI&(EihfȾeg (PJʡAKRBC SC=TDZ .(r].& !yf߼xo??]_mdMwZfڮ^}+껨vx^]Ybp]b\sLWr;#1aEH#Wv}KI} w +Y?^L!?MkH?L{+RmuJKN,Җ<4n9+_w;#bO. E56?Nݹ ϋO>}uɶ#j5vq?U=T9m?={s50uɶI^HPE٘켺Bhrz^HX_s<]Wm~iTo{ /7-Ÿ*\5ĄH/n͗ST>;לR[&c~sX&V DK3n*Oggfpyp|x7 ;wSIK w| )S D=ztR ;egEޤz 7]}bu {t{'{w !"|b^W_v7}}oSD81=Qk(鷪L^}CȖZ;?o谢meݴ ']Q)/[]C1֍X2&8Ff]663iUąOBvUI"ӛc Ktd|^T]Z^jB⨗rS9oTqŋbB$K!u:KH1;5իeSj2y%뻥Cnד^G.l&7TM|3|SBCkr5l2T˗C3;Y(Gua}L- EJn6] VG-&zZEVȧBݙÅҸN<4`;r$^Mn*ι:\ˣզqYʬOe{k.2ӝ3jCKU/gI>Xcmd[Ϻbw3-_~\hY]-Q//On/|Nca=#9/?Nr{4Oʭ<{!|3laR`L>*9`?N~-Ec1qԱ?T&yNzN*1'N:bk 6YS*V6N52%RuU*Sˡ Gw^( n^ɺM#zW.7YuSг~WU>i]5uIsBd?T+Uz]?O 7⊨[ur6h9\uv :nQ@@5#h\(Z#j JзYQH@Gq'Q`5mCAo@ !=c n=.xZ9}D]wo5t~S n} xP+ :mDD:mP Qۺ= uP3ڍۺCtt8huFC8nߌBjxGԵ8TELаx<Z'ꧡ!j"@' D{EokUsfn:IǨ> 4҅pbY즴r! 17 "uu AkuӍ n[]ƙdBn9wf3Yl-69*?85q?CLbWzO H~mrE>ajvg,HS {LҰ^n֗1L!tWVgkY?wzqOq˷ۊyw.L'a zfQNlm$v^Jr_kѫryw»a޿'xO~mB/rtЖtBޗ l⡥_\Y\^.v~<bc*p#?.aYh^JtlDT@#/ɾ3W?t!w1/v|*Bc|.Bc89L8A0iωcSUi׃4cqGK+GUh]˿+җۭJeQᵫ\)"b&Ad"̂j8UǏA0'bn;~~|UFy[{~'\ Xh4@Ԟm=Y Xn4@jyV*lp6?T8j4>jyV*lt}S<+nX?o4>jyV*b|;۫a~Ps~ۭJQa08`S,6?`"^h0,`. S V86y{??=̮FJϮ/~n̞ƉEsR|q$OFJ&}8GgXg@-*+beqᰛo6xV8{z;EsRCog0Wg̦yP h`SgNZTayͺlpډO8 Ⱥ{}~?}JU:q^(EsR'Vzɼo*R \vir~e_j|"HǏda`(Xݵp7"^{ 6&_`Ջ8Z|osdyf97_03?; 7|nrkfg͖.>b+`\wlY0꫺6͘3awzO7u6}"D&oJ)o"DΚ&oI)o"D&oKQSD?kʛȾ#WMyw&)o"D&NׄȾ'}obMy%'Ք7(?Ք7@JMy&֔78pMyKԔ74hMy%)o"{Y&NQSD#NMyُ&jʛ~@k??8jʛ~rS?H15Md??ggeՔ7jWSDskʛ~~?^H&_LPSDKkʛ~9?W3kʛ~5RMyk'ה7Z&_OZ"D&LVSD[kʛ~;FMykg֔7N&M]SD{sjʛ^?5Md?ה7a&?J_XSDljjʛ$qMyٟ&/)o"D5Md/"YMy_&/)o"D5Md迲p@zsfY軽 ַ8}س}&ҿ zsӯ]Ob޷b+;JQD|D"6V.@HaV6إ I" !\AڔtSbfpvpH{2~g}Zj|"sTR.N2dٍ7ƥ@!r>W@%hvkɸ,CoKB|JJ$ofQj|"sTRA<WeiPPIi&a2f *)rxy7vn"EPۭnawXoI@-G_pu5%f%!aSQI< o(tpo^wh^tp=?(1]Ng'@++?]N_~Q<39ļ{/a9ztťb().y,6x;6$^v=݌Qw7|V|b>1n 9?.x}+)ʭ鴐[et9bJ i+,t9̝5GvJwrw|fn*1 /:_;uݟC)|9~#n8J^^ݛl~zusV8\#ϯ}GNبs⍏24;q9{dzyxQj;eyuk8^ߓ uZ^O!n_;;q={ogOl^oBBhj(@$ AHhY۳8(*UA!7@ZJZz@j8 U@8RHU:xgM聤Hy~;=O-*o=R/ =v'Dr9К`}cW-,[?c|=t貑Z` WrSߪrLuG[ٝ";NKQ2) Gx~=ys/=pRyFٝ"vno=K@b |-0웏9~b|3SW+ ,5 KN~QVO_*GJ7piR~>z;Wɮ|p2ѨhWڕZ9.3 QJ( !n0뵌I(?o;j;e=կSЛ7ӈR۴EO:"MR L&-geEGtZMˇҴnu/I~+׼bȚZ`tMfWQjӕ'g^b\:*FvD,}9;Ou,Zj|%JSNym[Mam-M|7Q|<xF15s[j͓_$jbXx ,ASmr䖝rcA&צ\in~6[^t3|RNPɭ[Lg~.Zd^3sS ZX~!NaG;bgvOG_3*Ib3?]漩3rF9rDV> OtYA8o|nWd40v U\n룏\vȑ`>XW/^B9-PFg gU{BٽVKZ/]ho4~Dqr:m^骏TWvqk@\}]293`]8 ܐLpGk/2 vFmk#˨L7|dyݤ?%):vq]30g7veW.Ev0 , {}}pVb<膔|Ei+E/[im] #͌yL^ԽN͌;E/Ӓwp²w^s0h73z"'ix~QV_+٪rEJ. g&LJU'w HXj_k s1F\e"|p2Ѩob rNR&)rPm.)kg8/Gu7RIɩ^|S;zude`!K?TJWi|=r.粑qVa)^-,T(Vx~-c6fެjS^T+Ux_Cc|3[;fj RR?\A1,MV@ƯMUfbtX'gl]#?J+r}ZLx@H>$5ә#Qڢҙf&5}zOJɪUK՗߰]%L3W$g"t$WV&xF17)ɭZvS|IJe;EnmgwZWNv-3tq9{;{Y}CIi9y_iQsr^=ohzVzyOG0ݙj)BsU\S:ơ&q]mS "JoBc/*(yICNskszx<[?ϭ_ϊ"}77nWEe߮U_q4Ѭ:;_om_=k#?k#ytm@38/F4J3G/49WZRfo1WFR#1 q#XO>s`8qcO?ߋ"n6>;OmPEWH64$󝝍[^Ice?BN&NX f:W3bě;߿o|vߞ*X}?.r\U?]dCC"lm?j 8OJ-?[|Q[ӯiF|~|/oO5"+eR3~$8ƅ6뛝 x6`><xrot<{x6`Xx6`)o|{bElܧ:jVU0 IIEwS!ٽ&N^?gZ[,F\/~ s_n7"n6<"keV~/dCCS[s;mbǙ(< x6_Qx9@<l^j>bozN|/F\͉0JelgE<>A=E~Υ8YQԆr2^ h!/ v8W4Y Hv|f$MWZj|_j2jetb;4oݿGQᾛ_ܵO"pY#А}[6=Gk;Z΅x8ϧټވD{x>MU?7 OKhW$qhØEqg!E -`@EIF<JtUՅ PS+JqDm…bn\N0r73$3w޻ 3_qmC ~A__YYdᑾޡLDĸTP_o %כh*"u=teDb{X$崞g\%BJ9-Wti̜/m],rE`@UPQ>>ݿ纞bǑF׻7ߖ_v17U$Q<UO#MEoUu17uODڵ#M?>Գ;={hd{pT˺.[Cf: mH [e* a6th@l0- `Fn6$YaܒǺ_ P%U1s~Һ"X TZ`wޮHu;tunNlwLA_~>y<ڑGDf, I/͡?i52&,Br4'M@NʥP4)&$B7r{X&T\jP:ա+ajS]:f+UU0>]mԠ:kaڧNZ{XڳꤺCO]Tw{8_]WOt=\z=SO.tk {mq2jwQyCǃe NDLqw8^s~_vYj]NCKi׆TA^(O}TTSQ*OE?TTSQ*c&PtU̜?P:hSCڤtM7_Pi]kEM#+g?GZ޹suL-At!'_M;@_wfO]#M&]?m iwn]2(8fλeZ Jt,2K(iեPd0ݿwno~tYf]{[1Y?_wɧg}WٿwyoZGZŪZv~ ]I:K>|:7W w w߳❧_e|tqL{ge\?_4>EF ܿoe;H^ck+s@7@)^+ ܑń0t iŘ`˽rݩ!pyG>wx=>4'*>P'ώ;; ͑\hH⡢oy{qym}ϧXl{BU'c+^Ņz\WӞq7@9o}旃x/5K?LYq=Oe?Y2Lyvi1t|_ Rgw !*ә>ܠv(VGwas)4|uflOΗ6rto,g*ٳ>kP?e?S3heb+?%?PtٳO;B,f|j_2g;; Fr!SV/}?}|[kc5ʽq!ժ1ǕTNBLd=kiOu7㍴:'^wҾ;F;Qzrq7uj)U'c|^OoU'k?%?OG o8 [¶"1PErbܥBY`".*%t>ARYr| HJ$Kv=|s(yμ3/~Xa|{?t? X/|g9Jq& #/חWVYZson>wޘ~Y)9o<_{=zfxartį{YRN?'!^M/_!N+O'WC;?':ܥ8QǭܝŻϤ1)Tn4)v8iؙٸXҞJ>7L~N(>MʝC<9IsB, ]O?aXh'd'?s kEj85JnܺzB-d=l+p6mts4{Ş&p 7+]CS[]B;?:+;׫(߬ Y4Kܾ /ݟ[Xl4m}r=|/ҟ*,*cܪ W6/gp[~fr4}|kzح[JxCmq=ɋjxF=ōzyyEtSxV<Ě/N 8!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!M!Mko89 M%ߒ 68n,N,ѩf~Ib]mqL}0&"yx?<|1yHsƼ[R#'n$7'󔅊YX<u a0| }֍/cmWuvHj9x+XQo}5f~έn;6cRg4S:j4uj`1U)XW{eǗV4 ޴ 奍{iGbژ΂@lf~P;Lde,k'+ǷojSgTys j,aLZ,jO o$? .L-41v:(){<6[rzI":89'||i[ %k[]ݑ:;YX7V5N[%uڳ١ݙ,Vu2cґu=;<,:+z˵ 'i,oGʶ{Xgu0?]`{<[wMI"5;GG|z>..[蹦(cM.^}JxIS`Mp85k=m\Y'hrdjkUo(i<;2D+&Q'77&_s6\as,s*eG_lz\k'0Nk'b bIF5woN9-h]ԫ|8c z{7J8Bln҆@Az76+3HE4.(ZDqx*kJ_g#X,H S U3Rt}U^]ŗ{*z^L2+4Ǘ}q. SX&m [)ھƽDn0ҰaۛexxPc;n@la.gsu _BEnfU[K~gټwЙd?e?4K3n ۯsr'sKSb;0*–oVq4[UAQ* UJtڋsjx2嚊eiZ擖97utWPʟNcfwNC`veHILX3ݚ-VyNrI.'̕q;f e:JO&& f ƊO4*$M~DeV jٲ4X1w%7vbZ؟i҆LE*TB:uiBO=ލp 5+['^?nG~nnss9Sqk٠.X؛;'j?ܝamU!<5V>~-,Yq%;Q\1FԐZ5ӳa2mΚ3t.Sy&=c2nQ-㬊ԵANh&h)uAy5Hy֭8Nhi`[,+P+|ExMy]RIv}㬧C:1髬bZ]+ BXlJrq&;||xi+{ma)lJ#ǭ4,r׭<]䟕',xo}ˇl<|oG\8RÑJ'8Rߋd9Rɑ+G||@IxsLi]\Ns-.Y\%VEګSR1솴&WɘP32oR 7b} ݜ3v9{XI+vפdUz>?)?)3;7\xHbXJ3bؖعX_rQb[WM0N&'78 Nn6+v]d_g/}q9iJM~>=?vur¯Owݟx({ﮋ9;W%uQBZ<|s_SFhBV , [/{[xd8v;tPPPUoO|kqK`;nՕ;2oAok/.9~8N5Da~>/?/=|Eb~ X`P~GR =V xryfn(t9Zpxn{$i,A&R ˡ2s?T$z=>%s<nJ$-lYNŒ=єJʱpoyP\KdӒpj4s>ڄljLjF~wmDyϴ Mqxp);RLFŸ=hD$iE>Q+DYVD43 {'1\{ҥIKT _]~OG\M-1ψ5BfMH&0>Ȏ|^tв]Ϧ.E)ϖJKOUdCuH{Եuwy=Els*;4mJRmʹΤ _5[3TTͷRg -s4 &tJ jv2 C5;&.~"19.,z;/c#,Lo(k]#G16jR#@nv ?߂ߌJǎ Ju˾enuտV>V~n#B_A/ua'0@.f# :h4i\1ær88P+ ȒTߔ]3${#^G+6&o0Q4o#6U4EQ Rqroz RÙͿH[w|i!>z;΋4rNӡΫmt}fsI$ciגNg&Hm$1Ŋ UҚEb⊗.G!B/7R߼=+BʍK[Fva/) 6AY9 akcekYC}7μՐhȵ+f=َ[Gvx 2KnzFSlVS!)ۓ1@U⸻hR'?mO|Inmw%yT̸[uN:u%()FI1JR̾o^CfO?;)kdqG~Új5c=iLZ oǬ5ZPoݰ5q٘1m6m-<$l.#]DDꀔvmݰb=lՎF+^G6zo>2Ag3խܧx+<ߗJrbq5[,kU|7m-jU\[dndMg{ 2Fhx/ TŨ0R uؓMt$^^pt\ gb\u^yA$`mQ(> L)Xgӧ@\K hM|{D@jJpU <J攬A#À Asy6lT3<3mA4 r.`%p5g ks _cЁlLD9 ӯF( Ap]Onf1zb$#!Ww~I$/#֏V< TN>N`DT"`u~8[@Fpx8<Ézq6֑?g,g, a^tՔ[Tt#lܕQUپ"<fFo[B <*zTޤ pQe/gWWڷF̞7D4Nryl)ƻ-U( PA|2t0a59$p ˻-%d2C".D\C䊐fxDO,r1vjWQ$"-Tbc,чZ<7hS\DZa&J^x|ƶnxtip%f}>n= + lXR`!g *lHXP˖qTEQjpr^vץ`2:GOir4wBDߍZQ`~7l[mƁ;c݈H2oU|%B9 dk dO!63]Q-껙M *Ӥ057\]Uф-u݄D-&e3[O2Y !DD8AW<د5li&5{J#lwAbU:P`hDNߓpCzʤdv_ZlT~z3 pK"*~·D%"*7]Fo A/irpno /g'Jyqdoc!5Fw}# >&B{e, ڂP1#wQ|@>=-2pl/Q(.=`DsoxpX; Y ("XL61uebIeo=F׫v.S" :,͛;"7!KL6Qkɾ]v'e<ɻxo%Dҁ*ҁ*ҁ*ҁ*ҁ*軡ۯtݴ,5 )Ͷ)pa'e3l$`(Ju#v;Ĉ4y LX#Cڵb&<њkY54Sf݀.@5"*ՍV7\m e2/c[|nv}|3\eW=025Ewkێ㇆,a3i;LE}4>?b'/ 1!Gt2I߈3 nOsޛ VH:'? o^,$@L~詶s`DyJSrwdzr޾5'FV-9SR(~}I@ ; nF/\B믊FGo]7v2:;bzhYflo=S^lJEhLN}?I@N|IYyّX_Tr$IVJR}v"U]D٠,?cF*i?)> IvM̡)2. +ȃ Gtx>}I q:h^޿%מ$0( m*|U!Ը8jYyeckR~UYU{U|rL'~lAđo#P1j"*ݼA71f̩h4CNCC>rؤ03b{QnP#Hp9ɌS]s,B5EOθұz|jCM/ϳԯ!P[1oż~WHryOq 4A9P?H17_wUߝ|#luhB5Ӣ0>6iA$eJ IhaB N9/B:L˃HP= ct<z pʩBʬވ!|֛H5T -[>RYqQI[,un0>鍟pD(tN_yW#YW-Y)0 |GS y (By^ qa=(R"R$:UvHiK ZvsڮuXa- /ۖXA'tsk7*cK\8m@iUl"hj2/eH\^a{8Ub^dMFNgS#WrB~\zv*n%b*&ݷ|NӿL- q uE<hCW5>m4>~6NN mVEUTQEUTQEUTQEUV.׍ƃl4x})j&UQE(7{#3**o݇}n_?F gj|sMb>ѻACG^G@tO~zo{??䇏?oh4|%A K\ @k=R?l#/EwJ\C¡81^K^LD pී.6厩pj Wpj pE.y*:Lo|Q+r3 \ ]pM\5<.v ?=%P7/˶8JSR8%^۷^})o6GOӍR3 CmQ]/8e%zW/r+݀ 5ssO10Xz{Iwtq7(1~,Zw)5LJ+@X|e̫Mqgqh0Kܚm/D ] 8(:R,b1ɄXGk_fmd$B4+wL t3,;kĮe8a[v WL;BӁc-Mck-*瀁ui%œu D6^.\ ]O QJ(@;DWs0`C/raҾ]>N4_#dx#kļƴL6 )PɀJT2 X*[Z%'^2Xhl/In/hW1.t|8^3 I[!cK?l}ݹYs.Z\\tu<<s ZFkݭ惺wM]ny2pöv΁2al?|~Y2Acr_a}8Lo*ġ$*RIJT,dy$}z2 n x\-\Rpe@x2K/¹@ǧOO{$i JoPQ0E*цd PDzV [hZ4zz[^Vm~-c|vJ6T ldC%lo=+/?a6hnlU]w7um]瘍`%ՉW+? Ѵif=֍=rVP7T{D Jʡ^R)6LRO CӢSr)kb*;t״vVٝN;lS8Q1+9O5CM'U${r3˰K*QהL bP1!/"3K* tLp$k= ?#FsUVWL̂NfJ6S͔lvrپu쯈?d/`>ohXmˑaJթtwoF 7%TڀgT^ rSǜgv5! d6?߾2=SˮTzzn3n ;alcE”V}K~ Ad._3pկ2üYRdX&$?ȥDGǶ]ύcY E8"F(ЏH^Ƕn׌2(4v`s$@$a[& ]yl_c._ͭ~X)mW󿋲ol4<d:8BB @*A [)MrŒvXXq*%CH~Ӄˬ7M1~Ǘ}8? .Wɣ+ՁgG/155x=< |cB(^jnrRI4Ynf +7RZ=t_nZ9m D5O&7$qcڶctXyxcɔS]r.Ptxyڛ ]+p)[lf k6wHrG3pKE FQZxUc֦ԴDppvDҬ]ɑ f,SIJTҤ&4Ƀ&^(`n1Cpᵲۆ-X)䰡Or4O=>A/Zy'FOذ5Oɷ%III`bH7_± 6X6}jX 5++lϲ];q`nhvd;9!EI,6 -D>~§DM&BNȘV$Y+%;jԅJ#iyJI%J*QRJTr}qU/ID3Ks Pru9e*NJ f65 ངJ?Q~#L_Tw5L"БwCrq{M rTE k1+,w:?a]IuZ0an[N]ށ_4n'  T/W]+͵Fn'd@RM-[UVZWn'ð?kXg'eM/@$>ͯ %L6Z,%7IN|;j#'O,It>'D##lf\bW.YlAW*G(BҲJL&nNFtxy x`x9&= ^!Nyvc.s2.2 6Gly4f3Rdk!w:b)TߥMD <-o;_K5>CP;;dw3 *tzFGd|S)xhhwoh+;~*bۘ8NN[mpvlj:V8. )TI:FS$^Ɍ6v6E2 {z%db@J•fcpNºlj*C8H6VK-S| *Bb`<\Mx"5>NALS K+3}2h=_/;!۔,7!V2qq |3\8Ԅ+0C#KCJW2̯drߊ-U*D+G~eG;/;)dV̅(j,ZVuHtKeٔl5SrTU1aŔl*7 F^vB3M+ Q;zl:~6Fe` GNj"źMSʊ#F䳇FI3!7Ŋ+Rh׬hR-57JL1do]o.N܆l]4adJIe=' t.t-E'Ne-n,ajL1,=Ypq%SN'>e67[v2Ӹ˺ GmޗYۆ#r ?~ c{5y Nhk2TH9F^^;@ԍȳN=pTM/dhR+dHIL@83 UiTf74Bf=Q~ PNX֐Z$3[l@ydL>#cY^\&N(1ajHa0 r.'r.'r.'rrv;}+zs]Q&Sh{7_/LXbe8F*eJv؄ɹ^`Q6\ ׋͉5SN ƮqdnL|.h#鋦;/"'Ʉ&V#$ U5c>딄/t uMj/h[?}Ћ 7Ѡ?w@FYKῂmAU-Kb*2Ìw3d:Wcj ^J+@=ؚl ^xGlf[>=1r+ ѻzi܉uZg[%ːUGZ\&/ҵ eY@4{D:NP!@wvNyGY互I3y Ft֬V$W\|jmh5/l˒?-[(+!j 1syJ9p~* bZnA.z#`;HK,7 $qƧ|g`2T%\eaWu( mkYv{mucRА L琉 ;4c˝.r6cRn67o\i9omֽUW:%QevRw$\೷J̙u sZm&9fۑm&8fZʄtQ ܛM4B LÏ #rMэifp~TQz/?W nxiQ8;w$?<7R %?4/;)y?(gC4LG/`̈Id~qa} %nl4<d( I_AJ$$Qc+/8|- 4s*Fg!Dr,c(á@Ӥ?&PE,o6T!tR ܱ igD̵v!VPYG wq1<C%b$gsB86Ϥ!w| %S;BE+W?!ꭲ.v2v Ќ;vq:z|Ҝ--ѝdEMtxyڛw Z,f D&~"QEO4~-4,8M~y ">y?c`4z2qYB`)C2F I O{߭6Js^We@ @$9{)\ULmK$ p0{D%(da}isƖޯ)FF*ZȤ/)On\EaUK$g~ HQv#bݧݧgQ"IT¥?'tU'y ?b mW H)Ry!ͥCO^15 \ %`PKUH,8(Mr=R^, i LexOØ(4II|%6m+"ɠ%)$.!dǷc% )=HAJRz҃=9͓,Wu,$*D]v oYbaI\Lǰ 6X ^@m1@ 3iZ!j;,v z8a1XIiag9.fñ;ԼX>;ᾮgqn{Q; L'}}my Us䳠(cZ% oWğ 0k 3 m-Idz˗M' 7h p:-Ge+EP)JTR"xseO_,[@. b? ^Na_iقoILJGxr9Z6T%m ;wڤ; -s½ϯ~n2GLVFt6'[i_x߮X!YI EP[Xr_s-?$-c$mq\Љ,KV8 Aֈ(r眄`Wid@ڨ͟?nsq@+Ă' نyfQF9|M-ϳ"xĦizf068R/Q'qyY^=' PדwG"=K+D6S'+6.!~R1?Mg]?omCEnU.}nIe>_pKKqtLV'y^HVB|<*a^XPhj* %vQ9EIcW{Qyq`Ι{S\|Ftr ]4/{U!Gu4 *3bB =Ga ˂[xn*|g0T3## ?q;HFvG +-њl %l(a6N]Q)uK>~Sv"6{dzC=\O~OunFhxK$N ~{jN*w]`&+fo{}i\%;~^&ƨu/SE(*ilI 1ekm4,]m^F )@j xr!Cyst*1}꘴;)K ߽j=`~6ubOig.g!JqF_Ts@r I 6`j~8o.^lݮWH*4WZybErDr3sAy~[a? 5<[FR ܧa&T(́MS.e Ef #6U*OA{(N.7-w&rlՅg!ܞР঱ ^xV .JoEO/=dgNBqG$Ơ'$m&v4; qǨi ?@T˕N3몼zj-F,:x5!'~IEE{9O?828Q(fqwY:ӷl1U<)=n;8RQ)r keZ]NQ|>c1~L9kgDܱb7dx70D2<]>xu!&kPСU].e\XhE=1En)Wz[(Y}9/.-ht"*R,Q#<ΔNJD7]FCӭ$~fڰTt?i'Kx7HUǎexq^q,z`\`)z:QK@o9O -0__F@E88xbE[ ^A*rc] 3ڃd//…֗Z^c2PEƀUN]&Rj~%װݭMk]E3ӌ?УK t=5#+4^D׈Jg Sf{`g 6?o4Cklv`cƥFq Pa,to }GmJgS Cs KgC'Ѹ<e aw4JT,`K%X`ƙf4<^MϜmo4|ƘRMB.WI{L7\,Fi@׻4Ag @uF8\' JVZGR@mN2G)>my>dx]#}~T78 cX" =N3.8 ʗHscq}Nj+&:wQhYl.•@uَR}c"%V 6C8UfB Z!.8o'L;y'lK6}a`I7F~s3 5r%-y^3db~'u%J+˳;6+YuM'u{ޑgib:L,;Y|O;ymU|a.`i`qZQi,ѻBX|BNufW8w:*1 8-<*T)J)UJRJznU*g 0rO)u7xG,u_v+1K=r5fB?.nrJqVm<_';6tnɋ1]qqThoQjFl~ҏcLݳ"'p=7hxnx ׃{I` Ң#|3uKI020WLIN)#~rԓi(`69mfzf֙1i (AM0V%z(ube⿁sT98%))NIqJkU{n[zxqV4Ka)ُD:\+sv[KMagFзAd9qىaƖq]N"\N(~^U,=fԯyå"/X?vGjaֆ$%YDjUwu%9P6^ kSf$!},Q9q'a;p)lWa--lmʊ VAmV{ngsNe۾nz؀~d~h@r}:UPb\ e9\ dbl,@1oLޖsH#,N$3W\1?eϭ}m"a8Z|j^ϱs}K;S_S(mdoƗs)e4H^ma QD&FTL'O??y_O%X .>g M0{ 6q]w=D$u O&3is1U@.3[5|骅.eg0n1:9&l4bj $ڕ hKCOq4CO.G]6nI{"3RcA~?H"ǵBΙr4@>k{"|F$>VT{9^alA0Gco<]"d8(ql>KnyRke2Pd2d!h Ǔ9?8"6 iqbEAXFEgb)qdFJ2:%~#9101st ]ߌB۳@iA'a%qrr< ӑr>V \3:)fbF}/yRoZ`<% T}`)SNe/[m1~MN.gyD6 [t3Cb~3΍V8N~8磫|ȹp,4‘0˽D1`ʇ3|p B:=(_vopz2 1P8]nB_!< F^_臿Q7x_O[n1,=r; b2xқSJLO}_w# zSÛ9 jDy W>^/G vE`vK  fvq-7› AeҤ!T3$uk~ޣ6 [獞*0F{(i Zy6Vﺯ1+4@gmsIBȟ>K# VNoT|!xP*ŪCVՆ'`Ū) 4u^7\UI"-W bQ~AA%Z&Igvi.OT/nP98ғ=!L=".FLon/lDٹ6_ Nmɬ7 Ri˯U-YQ2<(K,씽ݐx :8|1dm'J&5G `,i~%CAWe+>+|V7 ]Yaf—q}^!j]"#W.c fh+ >(~_̓;;`E1x$ ~_/Wh!`pBȡ+k.] Ijтk CV+uJˤӗi }6Kފ(7w 8} Ĝ '})8$ f35M]$ }:Hd!3_I < {7~8d8bg4T7 2ݧZv)F.Ni@wwHĎ#2#׌ܳF[]R*YJ/v8!܍!cak2u%c)MĐ=n6J6LaPUdCzqoz0ܽΎgs#6J6S͔ldsKs֕[/RRI\.~7wF4ٔ:Hݲ@4PE1TM~V3I,f1^$,RYderM.S sjoR áF7h.k ] Snyeu.OoNPɰ)\O`]ww2@nUVf3E},s~k6 x<~7⸒vt/_hϹBf)E7qM24m'.{_gޡ'Jϗ[$R{2JikP =?MGmjw IKdzyM mCĐ1#nYd 3V*[Zy"X,+^RsE;e]?8vDˈL -'ȤS3/3޾;6D9yڲw^'Z>a:ӝ_a~U*x䍈YZG $?aHoo8Cd3o!G Mn>mVB܉fJi~ٖvl u+GQJ &eD{bRhB' R(`s#xw`}X(3&bR( ;j_=yH1׌7m&niaL2䋈1szD7S|^J~I]l.[)5gG~$GD!#Zg{ 2 I3zʸ`ѬJ}Rs6q56J7^d]STe',!CK=UX#1<95[үy үKe-fn+zߵ_~}6љE$ J%X6Vwm?ֻʗQ+sMingLjʰZ?iG9O">m#]?ANv$AkPE|}Vi]<Ә=:yܥF )NWi&AX3EsCbjTg%2<;Vb~w_eS*T̎ Tp؞9Mx8?uzrsLn&2ռ[9)*#CR%$?mR gmJɫCG:^w(q\cOC0Cwm"N=H3i9$'LCVCcmj(^uHx7VQ[~L.,eq՗g1sBHzabot39.PvrSer ֢yT ̷ E u* m)PzT_1};tߡwC}}WJy?py/b?;Z}}w-s3]&:qpV@5nߩޠl7Z/W7㔫&iʍ/5? 65+*esv+.>N1A -0Z`C~+=\ w|Tk¬37кJ$?"+o~):BmJ[7s]^aH{ŌՔ >v9=( 37>Xp2E.kIn%&E֫K55Jfg%ф;9!v{甁F/wwSSˇ-k1K/\"WP tUuuAoVMϴz/YiY[*mQG&뒉ۤ >6e*H' Ya孳z.'vvh_ juz ]Dyjb/ >V֍vIMF\qnSmL" uL| -W0 #G0a>||dyNC:i{ǜ?JZTFuzݕ(L/K4Zwc.$X|gњn4>h mX5OΞ1Oh< H5(CQ0H^IzE*'XvO}26qa1&2|h*|B–i×8⸒4ɻ< nO?9 BBT1yU6KU^kFrQ2Q_}'ot:OҳuEG`O+a$;+WHElgm-&C0B KoEU+G3ɉj߁;mEP.VZH{ Xx3Ā7ˌ0&D+QMc 섥`M)1p=ռ\߫)Ԝ9_Od~N պ[iu:fTj@V9oj^75yȷIRvx'U`U0[VZ$oAV-DD aKh hJ pD^4fvp,'zdfƽW>M?~4h=Bտٜ\,zjA \r!b3Δ#l00#I#WMglQrAcU+85w?!rD |V'"&s[҈=b6uB\X9u{9yjU!gj堘'}ɀ\D]DqCvY V%r_9[A=i͍&lӀk+b-CfNVfl q$uc!ב:s\sB0dž̨⮌Fsafu5͆]2Y.{W|ΪL \51e+;$R%h~,SpfrZb#uJi<QӪ-]$˚uC?)o)Tti )@9Ns>hV;^g&zdynGs4GSkJ:\J9.8lexxfwn腡?h~zդ5_JQJ\+z`j%,C40 4pzYrNQadS/8ms"88$K 0:,FO]wͮ!Dxȋ4.~}U?X=H Wy\L DHJ 'ǺjNZ<cO$~+AG8G4L=E:OÏ3ʹt8Bs2vy{g;IY9j&Ľ\_Ol_ #)2v&NHU& ys]z#OX)ze=&e RsV^5e ɈD4䲱Kx:$2$71:zGwm*Y[[F0 Qr7j~ݮVA,L]疹b&=߮7 3h?IO'a"_+w"-&i=ZW0:nRRR0+߫5W#og^@VigV; sQAm$2:]cog=56 -%=VOsҚF;@ ֑Gh-_y{'VZ $Ih&Mdt7}sHWh#vp lLu,ejm<QXi<]y_Sթ(_L|i. :fNalGlvw@[.5ȒU\%}N֒Ec-^wMDܯ/O7f \Thl>i&xh|6L3Nl[aZ_w=5I[Χ4A|8HS7Rl6iuuI.ns4G`6f̙,B)mYҡg#һ*NV%EqbitxIYDvd'ٵ:vxՂ@+[ۆ29,E 6@"Pj\(2.8Y!R IU*1!jH9 aR(@Hio8rx/pb煂2JQ!$k.Hd|x{O.7IEVorhIIKJZRҒ^[4 '1WO y\NOA|D”=}1~@oӋ7}l]x)]Xm=qcQRvzTD8a6SY>Oo/Bq2O~oN0Ҭn'2Hvf/(:4kKЬѬ66*T + AN|Chå^I=çdb*c &j{ R*,勿 S7{P#+[!?բEMNM.֛E9`v:)ѕ2Y̨,qƍ(F0ayj0YΓ&hOD@`20.E)b/i8EP UHLJ>NC 8uBD 4rkeD?7k)1SOuY(J D*#f^O(b1H'qEBL=f<$rD}tꡱ|jAq"o3WH{;WٙlLs9vt{G홡='w_w.B^a"Rַau "D9ba@y`Pʦ"`2QQaPzbP2)1a0*bP2kp-sꅺ.TD*$Y"ʼf @2pR,ڼ!!@uʠS Q Dȸ\͌# dG9ѻ$8Y0r!Z"ySCymmxA  SۖqgߙNWlQOg5 ƆIP^dN3̢p7J60M zkRj٦ ^ժp%JK%>;7H'} [M`*\^:熽$q|ӍJ~?<oR| h >egDgJ d_.Y\a3t[i) g>[\KQK0-axAr2uEaI(|'|u۠|l` ?;(`rsJCxdg>k` (j9j*bnOpZP!V鐴`lWIV%|r[⢇-Wox+"&n|6IІ}-hG?^[ >X ^P|@,3?lq~ ׀*=(*vt}Ȩ([h~lYέ>يSM5yJ dgWeƧi2Xcm tDݗ1'.SwWK{Xf/-Mn[eTt3"H r{. (XSǙOF1}`/9 Pm\$.9́9p!xIs$^ǩF'4}? T`>cG4bɷG ҽ- *NH&Ew+Ib^\'8kQzGzo\)$PIzd#Vv=eCxh{˴p{-k^ ZmcG \8jqZux @z xCȪZiEV̢TXVj&Bqwb +ѱݍ sӔ9&JYjYiX[ç;H ѦxZ,t?^wJ(pеM75}> ɒel#Sm1S6b(V}Pt2R.U:нKH#~lX!ߊsȄ܇mCE~$V{XKEZ*RNg$KT/r9ڄ\v4̍(\$pqYpI[fiËzYTTupVVxO:Œ?h45@/x;seUUǶ"V" Ǫ0Iw|=J/k \!+a3qs6lKְ5,ai u4,hc6@y1zyB<y/؀wImpYVcFoPN!p|f :?Ǘ@@0}zC/^Iz$DCuEv@̲y!*i$zneh5r$a{8 $`9R\i,hGATЄTL 9Gn-~ǒᡖ2NQ31D$[kv8{=N4u{\eC5Tv&<`1%0I\JDY CɼID9 CEؒCIĄ DC|mMpΙ‚ж˛Y2Yv1s4i:6q+d2T@ s \m-ՏYd&#"YZ LE" ),XXd*a)XjV%_0qB TEpS 㿱`6F"|.7vACoL-8hA+ZqK)mώ<Bw΂azLQ﷔X'D- *BM^t:q1#;캽Nǎ/|guw^ ~c8E>+Kh# e.BR}HNlPA76xhS=d>V h%?&Ti(~_'ZK꾝&57`mؖro(/EnOs/ƾ)>YɃ07f_@Gw*KrRqlu!nЀւ4d)>E$wF+N*\Vwl+'aotB?|s߱Mv=ߴ`gX1])M}%E~ 8EjM2o'|'QBM.C/H%4Գr}FzY٥Ll.5?߉&<ӧb091F-7jQˍZnrrkrv|yMDhW/p4PT=@J?l2\(O$7$*/?K)nzZH.}"wl2o íPrQ%H3q8'n^ұ-ߏn׶"s:4mCBEFH0dXm}6qxa(Q嚖1F͏`5k:`ʊH ~~20aSro"h(O_ 5}&ronnެ!skS7QrxCU_5EV] oDvoi^FjMMY4h$n)Ss&M=\dR}dǘr]fFL_Q5f "VꎬUYN2LGT=~ (< ^`nthzFA }vkE^ kg@ȵu B]D- VPsKzEI]qk)k9]$^/qJ?#6[B+绯y{1c] ZJV"Dh%{P"^Z&wK$|lw>kϳ3K:ʃ@a04?~lB?&K@ E/dslS?S#~>$  <\v.!Q(hQTd|1N9ZHn֯ Jڬ5Fغ{D3o'(ˑМ .iSy ˳TA(Ƒ 0-`{/7 d}]h UiR7'$ܖAEa$a~GT f!ON]0f! F1 \Lpxs^Nv,?YM$iԾ9gۜ#gɜ b<j%bEUx gĐH c2xݿچ8z#9 35T kޛ+L:{-5EE۸VP3#CYtbcڀ5nVԙa2'ʘҤ Jfk~hQHrUYc< mrg*s`'qƐ /{JpW:a4Z:2y>r߫ v&vbٽuܰw;qc+kcܴ{ ND\9V|DFÔ' wNYM5qkZ_5x  O/ϷVDZ<-):x=t7JoQ;bHq#~b;^]-c88$ ?|}ZhA |I"lsw &dJVM? G?L/#-cm-TkIx@4 {q4$8oNfH>0Pc lk oF? ƅ!{Ͷq9i(^hx2C"%M n@? )~s^X"~m`Dr.V0U`)\%źs`DZX٘\ yI8z~GAlqlZn7/u뉶,h˂,h˂,h˂,h˂,h˂, zxS[r|_=St忄 wS] 75 T CNBP(ZLil'CAgyԡk)yOXW٪ 5د,$jYF٧rA@'dgs,w9rVqU7fq ywe/+^ibȻC4뜕T@ꋌY|.lP34x,N_B= Gv{uy`BڷXO iBOo.0: NI{n^`z 9_![lK َgȐKB١!Y 4ly,72VW|{jmu|/,_'襦[cv/cߋ։;~V̞-J9?rc02r(A_NuGf ڽy4{e}I+v)*Zrs\-6+羶9J?G~r⿛^k/^T xRAT%C_2!myKg84'7wVA tu㰗&﹡4NBԯ8׍UkE4a"v+0 nUߣZX+;hC3U4dB1?ٱt2](ώM gFՎT8:Yj,%lQr/vqnꋜń7@f+O28|3MG@YQl8NEzYҡEIZ5tS)] Jȼ+iTMf?=o뒓֦١6Q,frҎ d4N,?nt}/Kn|W-y[rCFvxLc'x xvN3tn=ZYéՎ[mWAt (.ҶkSJZI?P` 7i%?\v_)/׷7)\0Sg$^(Rh\K⿄xV}ٟI8~ #']\MiJBF PgcI@Kĭ(X'-  O6nO_O On h1xx|sXc/^Rt:"=cXav]︮q:f,IOPmhf:V/˄;`֋%*T[CV:9pt5 ɒR0HH04=  BFl%p* b$(ls(0c^g%$OmBx=ü0Ӂ^c~M(dNHxM"סze1^:DZЋ "h Axkl{u=x \VbRZ?Z3. ]aƶf\xDXx/NfwYŹ}|A5MZ??yZ;R럵>9K뺞#AqXe_?):+ IW/t9030݆sABi@^WI$MڳX-Clp9|S(\7`S/R{8Eڞ;e,ɠR{I&1 9hcN.|4|`Ѵ1?ɿY^ 9}W n5Khq&bzE4zY-Bq1Ҫwr`⃭D$4oI{㺎G='O:NXq/19;K!0*#Xq ϔޠ䊌Ѡ mJ1D|g5r6&_-4)!%%̣%Ů&R bD璬JNt#6C<:pV_!` ۆ 4Z zZӂCzmG& @);G|ys?/xǸtڎmw"/2}?p ;N/^wnz_No+]|}_p5 ,6 m;xX!`(4{Tc;:Q"ʱfẐ'0Yo)ۂfȲ[pWƳc!4` >{5BvN:bO@5Zػ7:jۨDiU[ V_ܥ"N-gFu, R+Jn9NbK Ɯ(ѫHW|+^q|{hn PuipmMa*E نfm|SlP@0v6廞qHC-{bfs-ilkC*h_rjnr`"35Dؙhؗ_G : m#/mz =.oECpiFWc sTA!aWW`yy`X53m| f}Ό[)/2]6gsPINg>dX>mӛ_&` p߀z4c0g(ɴʨ"b73&@#`ܰLgr KoUoq/eQ QnVc;v !UE*)*=^v]/DٰXAUxAذ@,XƜ_쉓I>3s9s=3s6tϤ8:xE!i?wuG;NSln2}euu~fPԍ/]~ŬZ>wnK b:lF΁-*cflxWZrPl[M^l{yF{Cnszy|qZ.S SJ._Tr<>_,gCyF\#~XzW ZLZ!gҁ8e]j?2<LDg i0EϾ(7?f0i'2_Uv6TDž ܲ-f7Y/=,v]-c$8/$n:[jX)٢ R-*\:eYkx1,k}VF ĸk0:C1kEXؼ^ԼBff/ YrONYnfs7ڱy~#'zxeˈ85ֈ[#n5honvϬwof-o7)gPLi{b̛FLU?GM'_4_{oLH`8|LgF}κzU4Fetʀ^ȫ9jZOO~ijߕv?}gM5SiI=E:Ml8G75*GNrsiըɹ0"VzϿ3'BSIc'E:'ꍥjgU49&9ΐ?M~:%_Sm?zӞXTv~gJk|QʸjVF~?{Q.#Lsw\ }fGB)/&˗ʓ9F8ZHQU[k K?66kY合.s +7Ma.;h>ø|ӛ on5ωᏤ稙r([E*I5k99}IWu :nsZOM>"9eܥhUW}au;!;Ĝ*06 <׼K[?|)W $Sz+jen98ZA6LhԪkr3eOW_+|.VGͯ/K\ĆvxB䔦vH]{߯%ь$޳J~Wq^fl%"l'ϫLWFݙ1ݘzؗ:q[lWofmwEnnKTH[9 I䴑BB0{lgfB BBT!񀲜Np$7眝3{VgYt]ֽ w |_J׬n )틚zv o:O e闑vZ5ފZ5[]D^ wi+mS\(N!OT縡wW9(-7)j= uU[nsXr.ٝ^n5KNX>Q\׮%TI¤HHO/;9!DvMLxQTw=ʘeد22\{Tc*c!1<:v:ױ]U ^U3:>'2:|$ǵs\,7G5͞nvtsQ^gݠgddh:6m҈ŊS &S '0v=2)@E8ݨN4SN)]Ջ_޴z9<7=MJH?*oZp-Otڟ. H2!=0E$G61qܹ;f ǓDlHjhiGϴo@{B O1x2`:nt027eI>_xt >lO1`M9KMk{pv@!NJl'M bɓռ<܎WK xO.d(~6 ,2⸡_f<$T향o䳗cqLLH d`Xځ̵쮾)q-a6ҍ۷@[n`BsV`xVj}p%ۏ+jU?~|N?m\L5'32k+\v:]u2Tgt$i}; L¿3],(sOZRYU{#} iqz*]}w&#*-١}[{* Vd֓8-dw=MZ`ͬt1[hڞ5YE&_҅.otB| m3Xu=wfRdgٱV\/R}ܝ]!u߮[]V0)7g_RW ԟL5\Tmӵjv[,{[iY$C˞_Bsi5$: խ,ZJ{7, I#vC'yIiI؏"ّDʾWOBλU*y 7TUq_-f$wFb~#YF"̸iZܗ5d\z'TJ`Yvd.Y+zld, ONn$>Yi6IzY$|1 [ߨSS"ۛէG#ߛӥ/ӕg_M6-f`]Q~Z$˪x$O*Ik΁X.(Ӻ3;6}Ir,)"y`6{o$'6z9^ʹ.UV͊IV|ꏩ2VoOSrZw0nLD2҉6J瓂.G2)Kc^R'A*v_egA$sjOqm߼۾s=v;UR)OKeoWta)=رk&ٳ۬$S2sIO|cٵ}e޶bm}6﵏=Z?޷iEOK+{"aYCfX z b5u+'{?zDxQ5Szo׶kC,ogxnLU,7*Z280/~R.;+)_A/ vJdoWNٺ~#O3v_L'YgoONx"g jiFǞ~-g)sOf؋o/I(N*n E8 ST٦֏0ܞ6zۡ a=}Jiͱ:䗏Gz#pvD'5"R&؈g x哿sj:X+?*"ǞxŀG~dmz8~"8?__$ȏ_GĔ=Ov[C=ƶW߫!y`z%} g~ͿZfjonՖ׏5צORHyf2[.Ue|\Hi8;8{AJR{oskϩC#D.$2-HZo׾6E~7ի"0823#?ސ/|7jcIu1TIw~oJnK& ]t`&F|{aW~n^΅1iwwnw._菦\x+~sݒx<ΝClLJ|a-)YQyw3O6%7ZPs뫳V{mV^3o?h7g"8z YnЮ[JRHŁ|(dҷ>hWnH4o S"varR bGF4{,Rdm^[S,ʟ-5ꭺ>p 7B}iaEų@~kw> WV_Kͽ.];[DL&=Wck5v;U?ؓ_f**9_'p/x׌3R7ek/3ԇG}r-vgʐK7R\Vpi8W2/ng<.YGx yd;)-[i^k6$-]W}˸=Ɋ5obڪ@]Wo:z $/cM۩'~R]G dTFv1q¶~Tn|\nPc|ҷB]SLF}z-E= rӅG' 1UJFU.'^R-$ZD1g) ( {21p0phx4 naSZ;Q=)5={m8U)WfC#7>8d%v 8 tdľ>~},7śv'Q#uIT! $R!u0CPS[|\J&}Wa-x[ŵ8f${g{;pےrP][}ŸRǑh:FuÚIU!(d^!,=eDW-2ֻW\= } bE4Vw;O#^Oġ\.ȇi/11_<LN;oW^8iJi  KjF:@u쥉}s⡯ ^HP$^0UBTBU *9gvgv7Mmk;~ZwΜ9sN~w-`x_ѩK[ס!O"Ϥ_W%N$)5nLKBӭF$WKBݭ6$+cfýT\nYr(/t`bQ^rƨDac_NXŲޑ{Nzc*ݞ}ztPY~٬I, >Qu;ˎ/vK!9jv?Nw{"/{Sgfc?~쯋^C.鴥oNQ 5Z^_)S4_?-Df]Xخ:MiF}oZJGBŭK<&SǎtOcS;MR9[V.|&ܡ rW-:~cv7bn->Au9[Z!f.&?g=<5=0(e2]IJt[6?e׏ |3fw[Gh'EBsxiڬ^dܤSVrQjO!>{Ē [[oF\ mwm%ܑ:G+Uj!7 <|Ӭ5?G]AhYflֺ-rr6ρpGl3j4ˋ:ꙣ#HARߺ%`F4^I}׷`ߎʪIO4-ua7'#ۂe:aW2 O$0ls+EV~z%-ur:Ic3fidFN%rzɽ8UzJghOw]m= ^LG|+?o3ݝyFLMM3 agۊ?2{5Mlue~UxgϏu~Gw{Nlon9;_ mS.{TҒqUso,%cii+n~"ي_*Vz#Q_gݠCYN}J~u[N{dC@Xn_qjͿi#_:1⿆⚕޳O%]Kz{i]j} ]V-zjId '%%LIr+,z8IV(E3bH! nun.lء?Fi9673V9y[stȚDV{%#xs*q2*?&?A)IRoGek\;o75 Kމd%Ӻ"9ɝVz*sWn%l6/J=w^(H"VM`dy ,yM42́ӪMNʉ.DdGk})"+G7s+hb/:9fyrdgG5uGYTlK~mm?~?IW]qَ `q3;k S`ٍxjq>v ˸Uq}AÎ}@ܦ>kq$ⴂD$)|]F:3 $/sOW MhJ5Q&W5;"f"=/AaTAzRު1d}dlhGݚO37qO},/2zبʃ̘ǎ#%g~#o\W8yN>DZHdV W ؼ?78Ζ/E u_`ePv'3i'gM>y9\l 3bϪڴvN=-@ [n*Jۨm{zOSygOo|f羻zGŖ'?VO1GϦuꢷ\E՚FS"rbe~[j6ܲn+579Ow 'u}/|u%sCZGgo{v NQӡDcBNХ2ϭRbh8Te֋ hMoqExЁ1}|3gimLEqKՙӬٸ|,~ka1M5مz|H\=ҳl(RV KVcey}Z?jbpqd^ \ʚ^=y\npȞr"{Lbwk'?T Pcwkv>M-^ #j?Nŀk'6K1tgi/?q)S8#E?⧩7H"Xݩ;G6WC^*>tt'l%/OkP ڝ<. "Sa؋'_ **HYh؋n!oE߮yT@jq86JQ]ۓdtw&"U@EPRz@BT8!s*ڎ<HV~cfƖ5X݁²w9?ۙ xj-c:ɑMkm0942$ɡp292JJ"b%M=v-K岛WfVz`GDz }DH`c0AH U^e`C`\՘*{ni2RUŬ3rj):u+b5:ssIX8ڸ)KL 8r2ȱ_Ce_%?E3ү#ԪgCHxXm})#R4y):W?)~;W<[j57U3e4wHuYSŀuG:Hpj6k}$ wW*ڏ Ō/bJm WiWg}k ):rAb)\:ڞF=~aٻ\Z-lWsSS3;^]J$1N=\%nVe'wp/cyHWE;|T%*lʞS^cTBFVi ,^UN-ofzY.69WUc/vΰ=x#x˞KgK5;/)o]ϳhɫ0veV1{J3SԂ9M^5$[;PX.|WKE.uj-W:ɑMkm0JIr(18L HFb%){Ze7;= O\UofCb)ՅiR0@ѕ1}UC MGyhm+ݵ+Eӄ@;\wq30j*9{jn/_.6 ?1aC'{GF>:1<0:;{Ha`0\Hpv_`/O%3NNʹόo96j_Wrt[_y(2W՛JծXVPMTQ5BcJ|*qh9:}ҷ*k=e~jY3P[rmx^I5e~!2;Wq_ xmz_TߞQ،We~Ꮫ_-Ī$_ss+Z?:SIh*N+NME*E.&5gk[{(\+uL].6Q(](ۋg7đl1ӽ1b\HWdslf7y+ώZ[c\G~(yؽY*Ur4)mIi&*zLjJ|~KǡIߪ4_mWگ;wѸ 3 Ol#WߌR͟!ެ[Nmg!P1ijV;umSBo$.PURDZ@B Q={lOmwCw7#ř7of~֣߼ph?½Qݴwz&g ؽڡWJ?mwPaf7}_/>V:ov5;nlgޫg{iR\i}k],lnm\nC=A;B'ut>t~S10'gOMM鉩S321ybvzZ'n(fP¥V]o!u X6*K,mJz@V18^Bֿbֶ-rnlr1]Q=FJ%JkF#9$Nj |i7E"nH%*nT@,ɘ>/y DBT .oN-l8ZRܔ`4ƃ~Q`qk@vwDy4өq/M1S3={bvw?U_f[LplSc˶Yk;`]{%2@7R7 nXP{v{e WϜCn%O,H^;ӨK/%sfA{~Hn1l/Xț%(pc{P V[zRŭIDܧܵ+Jݭ],v"Jn(Y[>]K:^)6[QHeѷ_B)4hD[2(GgQvjEw󉲣{seX﬌HZ+/U jMF%ܞH_ѵ ZaMtZdvq%]3 =J]k($^]\;g~x6]͉B_xNPO|֒#"Hw=X_:D|e[zDz#- s|ƫÊ]mAK_>H̤M<:ɴ3/R&_Cy42zTB-L#>7cϙ]I8%A4DSD*, 'g`6-3ΙXjYv>fLt.6LN"7rlr>As̓c1}^Dj07gj7ɴ>N2hT"dS'Hy|FImsn2yVU.;e5?/w(}etуhkmlyӭ7 bRzPK("pcQ(! ܺ([BKMѢb Z\񦁷L ^ mۢ;zhkM (}bY^֍PkBݽ6ꭩnԉ3fv\߰ګ+6pA~gywԫy^@ڳ?@;.29UڋIwOpP/6Zb?]+jagoͺ$_5󿙧#-}9sݤGҷQ,f3,}qqDZfFBP9"Y[s+kf3/d>l H$V63e5Z2u\_p}-{TufhX/']>鋌ʈUU/UdyO{h›zH".k[g =Z+3 @Wr7A\w_ X?c&s_ l/ց_ш82L෾2A ׁ?#:'_`lX :_ d/exP&U|PЗʗʗʗʗ窝䪓jWzNnNbkfڝjw>ɬjo+NNB*6*QiuGL٩7KOVkŇJśw>n>}ebrfzկG^M_[Z>)Jk}ϫ|k~)U;9s;odv&]7ڴ0m)`6ɚVH=lDi&64uHEesȁ`Be7D5M8āJS@0!58qnmT)u׍.hq׆?ś >ѯi yoOT^WnsMu+ҿaZmEByUہCq׆KOXߌ|2zvGoƼO95tgM\)]Ns}˷U26hZ*PbPbD,6 ţ|ӛ[(HDK M+6?ֵP!ҥ$ֺUY FtVx"/+IE:ԲStTQϧD}ąa&\~jso8RҤk[:Z'I$H)^v&]-zV[KK{ߋlT=&='tMߩi'F} ^}QgD̮^q¨@Pz'|[fN#',dsB13y?MbH$ Z(nԫ1 J5gҋy?o{84ep>?,^㬬QYʌ*47bVR%D[Sj 6G$#X7CФ]n=C1J)[l^aTV1FgJ*6M%g3t.$(Z IS*'9̋^Q'EL gĽFS ]@sdEk6@GT4(9zԻE,r^=:k;]}띶}=Ş2Vlϖ+dٲ/[VʥFB.7qm|x~W4/ $vMt--eN9c&Hь*Z l'y"O}s7mHCzfjޞ~~M^Qj(Ih[KZ8h3ԂT}w>]&[x:ziu~ykN~bVFKy|>{yp_Ik{zuܠJH~nXzPhȌzZpW59yuvQCZbnC挄2T2s3_*RkOwn 9GEh}FwM6_u00=8LLF՝mc?1hV0]ҩ%tWQI?իy p8o񇔨_V$ VbK`Ktj,kl"10%Y=/au?a2`wz*̽/);jKuć-QCxw]gaO2f_ Żg9N"s-+lDM_MWf˒.zE"_PCzO#G'ly%(;P)w흖⒮ڝSkn$! ^Ѯ{D{N]Rb;?J2t@w uβO"uTx:"g^L%HFU;g:W7;U)r*t v'P8{fXkBk;.rST,0YW3ظI}$F@a ]pEm⾏KLncv[~H;;O!Iy ј2$/+~nְ;\\Ca'pɛNݖSU)X7[NWRժʒ$~mۑ~nSMDu?n0Q1P ;]9$tʓ\]ao XJ9 u+-9 ՛7Luk V.urSӕk;/#6+E&Yٟ2'5|h>lK0Cg_0NkCgzuefP]683+W2j$z$Vh>%xgRߪYs1/|UDmG[U" 6u8>»?%tEרZ8%돮 5$[kKŻ?T2-;덽/ҝZ)՚UdzYq7rKf^TkN5gsdz:ݲJyTޱ-S*vsִKVvi:|>_%<tD䬡vH̸ܐdV,=:r_DQ`}]$;τ ?%oDOpk F'E.nEŽ}]p§,LLn4W& orv9, ޗgD>>8zS&o} ~NYnbIj*v׶պRo˒7[lIH^Vsvܵ%)JѰ]w:F=WEҒxeT Ե< -_JC^{QH3АWE%DvC9 [7;̓QkWĵ7}/+>[WߤkDLK~'U1dv k^gڨ'?z15c9+C}BJ$S/3!G2 YV0>0ny t0y2zJ77g꿀exQYQ Fj?:돲M6E5϶Qj0GxVhvJm{|~h_+r7˻vɱ归ZN1+y?u`!bfG>>n&év-rLx(Y||/|^=OSQC+wtA+qrх͉R 臊?A'`d]@)&@-Iι=}3\-ITx: {r3Kpͫ8L!5b!-'tΝvtZiΛo>tO"־ld2mPH+O|ɺf]\_)_ˡ/ PVnPHFSu2^][_{i7>#4ٙ#D>;Me3\d'a$Ӎd:ի|ed$b|ں_SB~iqZ>^p!Ɇ޷;&u?rqJ8WRRJ 'mOL.L[ Fa-lq{qhX_>;ҧ.5.ht6Oد m` m|]ԑht;F,ĀιNAڗOʙ8,4Z[f2nԓ nvsᬏ7AYnul/wfݩw6K/br-U\ŕ4JV*UOWC\]TBx-oBS ^[]UO=h0 J܍׳x=םdcX&OzbeÓI d?oGBQOEҹT[f0MQ,~4>[s:9vO?S_"waj"?-S ܅|%sǓr& VkLkwI];ҹh0QnQ!7u1qiBʍ`H"B1֕?~D7&&.d(Զ&sww8n;ϺnӺ~hUSZ\+kY XݪTz-JވWEf\̓AoI{޹_Q?}i3PZK5q5]u+]W&,rY|sO' ܗw1q&vO~9_6k߅Lr4 Ņ O>}IR1PXKTqjƀӵaUѴOQjD~VL_8\ lbxof&s_矪4hw;Nȅ!$nQ\w+pݿo[E𯝤Ĉ#RG9BH[u^[K# ,9!,X?f{/'4-дn޻{wɊuoV3k34k1QN-Ηcei15[^Z\yvRY]D8W.G|c8Rծ6Kv;դqT{S #r4EsBxcn]=mw0?t-o܊{ο態a?; ʩrv,-8\yvRY]D0q(V,4kWQL jJ1]r}u|6i? 7ޙ|O~t6N;)s"Moi*MYE/r~~(9|oW? g>]&7r>xL)MkTWN̗ZIlJ[|hV.\ a8LE EWōJ]tS(pW .$ԏ֦ ys2=\ /־+~tؾs$ݮŀcOT_:kz{S0ZG=ѵ#559WW;;#S?6zYj^+/KŞj}RlF[־G{] *Kת.N兹|0XWkZإNc2=1|:99:9S'gRozv*)5>'zZje|< =m%g{o_ā,]Rv;ʽ^Ji{owϳ`_rXmv"[MSw"zip>nP? Ĺryv/rq5R>߲݊'zV bW[( sj1_.,?oiEwP+iV1c,V88X֊BT^(^WKc8UKKbk/lR/F9{'v%ƗY|oF>1886_eq9c(:t%ͮDczit^FQkh|}U9%~,+nz=\iG/>JKAV>DJhL$ײWyU?+GU}=$_fRW/oz{VN4ez+Uo+}nsns\}KsV N5 &e:D7sC-&-ϫ]-6uFGUErSۧ` 'ѭD-SKacR;3;i>O=6}zjrRf3:=;9+TEU/oVg폩OoH̸M2 3 ,/rLL' WfE t-q76+ru7nNc ?̖jg+^n_\+_ng %kz5h+Ǘ E0^ ՠ/ΝNꑓ7\wu_dcI3_=X+\b/jIbf_V^o$qAWn%b跷eBX4 I\ e/(J/ ] 9?|IߧF!)C+7~+0+4A+$LV04X' &uG eZAkVx 2=:8f1Vk Mg.4sv݅i0Sv}L]hN} .ܧi Fֻ0/^H|rNwaB߰Ԑ.ZAz87oYA&X:.QUG9Q]xµ0JIW~.dʕ1ɠ22e'ʚL5(kQ9DY4{({PyCu& 5ct:s' ՙ%ԶS=ث^u{Wx讶{\WA]#~8W\ureզ[YvðjjXujۡWmwAWzEKu>{pe.[]~iATǸgK߳29uf,Rlږ0uR#0z COHCMi~yun|F;WMj}S:;'RioMi;#?gEwԽscJi,b@c >QEvߝxw?wz"1ɭV崸=w-;5.VejWY.[ܩOGyV6=Bv JiݬkԊ:^効6Jz~BV?-n?ZY/=2ۈ`66Kjus zwUaqs2W)ժz~}X`kEiQ+5L LyaE벎5L.n~Zժ5s_/?,ׁJ' Y/#MDmA9]+AG(UfrY[o;͵Ś (56!U/av[G_ oK+Y@f/әw۬{cӅz5sdԘT CXCY_Ȳ_1\YY2 'W jN6K ZrGZi{ c?N,ͱ <"ePͻ:?غv96ǖ˚hOַ@r%9ue܃jұ= !0^Ӆv02 رi%p`#Ӡ>/r\'3r~bC- UZZ{%QDM/Be@`ePR1A%ո?eAę?M Aɏ {ORIJ\Wkα\@c? ζSoc  ,S oSދݲ"&^ /B_M-,GǞEJ剪)5j K[Bxgނ9..~*2/TyqyI;q-OvqnaaŅJ3߸&e+4rS/q~F{H@ HWfd}"q*;zK*E۷oVn-e)ۃ2Ne=ó܈1l۾P}C+ 7o\\n$xsiDT{i%IHn-OF)%趒MFO><9V dfy ->4,c>lyMwטyc` Fp(WGfaqMs{\ZɲbOcd- Fuh;;aKa 4 nNB~=&|ZdZ ɷˢ㼛 |,]:] ,(@7͜n륵bĞDQDml}/RxI0Z4-BFqg$޽{qcb}Dq;'qms4b>cP/ɉ+rvO{Αek|gx"|'2x<;ޤ|GID>yf?}hP# 3#L MS{^;7#{W*؛Ɛ"m E扯ZѶdweΐ[_@a$`4[S?(l! 'V$ϰbuXU*KJZ}Dž2A%lE*2`E90 tه6cԷMjEfj On97Fi҈8V;[F |6~LmOJq[77\h=S˽Q64uՄ@Zni`%lȤ}OɥF˕e,Pef#/[eF๱y,3V`ܘcYyZv4a d rnA=-͔RhAާ.0uey\]흰v0ވþE M^G {mf7qsL. d/uWqW8{BgƔ&t,O.-9v ܃54tq]@ٸ.}gЅuc^{aAX`uE8ćn1>Grh-8AqFM57<:x(3mr% ?`}=X'-'!TbPjU GV`h0B0~4,˲'FCďD{ le?gC=> vnu.nzM!>(S6Xn9N=ʽfVV5T[e+[$Yo>FNJR[+Kg+-QN.lqleɢZ9SA+%3R q')d:?%ry84h%ήdtizǘ>)}{o M渖:80^OIZ#0yQ%1cPѶ ?r$sIu[@3`N^LX4rp %mfqiAoy$AB1@8 ;;K gT&3UuMQ>H_\^2yUĚ"" (?K'>y^]置=CfN&v %_#5L Y*߯7qYo 4]XX(Ҵ*%Ye X&AvI Mv_ϸ:Nf=ʅ cjJ[ZFYj囈A[s!h,Ynߋi[{X]$zl\:n4\SSP)U.+.Xu @NfE42z) [C6ߚh ($M ]~ B-H;нZ tI6wj!*&,V'## iիwe%/uKjS* r1wZb-Dy5 2~Un*3 cnjCn;wFz-R L bDgHӛ c?K *wz)' ƪN/tJl;D{'Dl=Inu\6PQISLwz #z2ѫ0M`PZ.F=fXHb+1{Qu(&PJw=U#]Υ't6RP칦ΰ;0.rJM~@jN^x5v ]\2|2G˥+tn? TȔ>*uɭD'8p4GfA^N[KJ<2K,]LJJ[C&+Up(SJD!/: _|5Z0sE?Dt9xh0j.^I 7Pp BBE.lVq~.Q~c2Reg+ nkrq3=nޘI{Fl}83kb$nӞvD4Q_Q9;uwy㶜;D/-)ҒAԽEN0ʱφX9V3SgtZH_V^T7kpN Z^N'b_+`]6P ?k2tͧQJ* cQlˏ=:dr!#I ̐ 7b-&ԱNGknISʕJ~ wfjF *"I"] S&C˙A1?A8K]UŵtfL:{U匮N^IgIᰨ̤}wk!htfKЄFƿb?IEVd\)ʅLxq{Dܰx; OOLFF\G9@߈

[1P]2~rYy' (id5?"jvFzv+C5|{B.SyR ShCoI n=Α(W`U4dLqX;f0zѧG#ϫV`Cp^QA՗ܠn[tPʀuƞv'2P[0eS+z` |D/#Ce8I1/DZi(Xzv*5h}D}`wB }еڿR+XYbnG]$.[9đSJ߅IhFRt٤wER^bƙ)<+SLܜ:X$bG'U' o3cGr(kvVPY01?&/`}%y.uQ&7HRy R*ڠijI"jt΀*"0܅X5%L6DZжtY~mV/sE!w%F$؁Ċӯ|/|JNj3*(BpJ r7Bܑ}$ :(\!ZRYx'f?3ϊ#si$`a7_4q) Bi~ 'δ0y31b)p Jo2㦀ѯd')dqՎ}>q1g;h"0 @7X{fXaߋ 7FoyrI,g',/՜lxUnWIL͉XbÌ^|90-autcm#_q?,Y7?gv_Z.{2(v]c E87*NN]/Q+5xW;Ty+<𒿀 N>2䯟7>m@*ܼ62`cNOR$Z*eẐ8΂,ǒ}CraeEN J~YE-f,' KQNJqLRm5I?tO;M OާKc3QT|VR3P@|2nP$6)yA7;'\W0CM 5yRffW8O*I|b k(qmS\<*cR &(XNgc&Ulo:jlmQ+YPQ|*}lJgqODNΡq] i^Tˮc#Ꜯj07gUҾ(B s[8k"=EC*x򁺈X*?v'j@^Zœ)`wyrLw M|DIX[wTk%HOsMw^ 'b wYteU?K{qth rv*䰠sږk0/\K+ 2ZEb.*|aF(JJ2F|X515DҵZTiwA*19ނ?HOpE_DtW,^uG< R˗9ƥiq:6fsr U,1͖hە́֡ POgxa Ås\ (mum^ǼҜȹFܔf0S7۸ߏ{\_:6z̅H\Ug V V.Ϯ҈] }|ni.ƾ^Am>&'"̓e_ )_G~mrdrIH|{2.:gaRw:m3. ]&:Vl% 4ѻ_1#"2IOTJ+ J*4˴|%5v^QWx?4(S8c(o[_޼G7;8\.@=~]`nГȠu-^A As{ToИ0[ً|I7t9_oyYv_Ynm:ww<>>% G*U |d(Qh`9VH/0D]/pzU]\j 3G|~X)Yf>>t*.ݜ*zBT Ty/Ph;*x!AńU%9E.Z]*-,/[3_1QV _~n׫/_~G#n9u__Ѷkmֶkmi{9B > ~i~XgYXrn5#Wk{u`&s\Рx>k57VLAgTP9:&fXC HVucrMoVh/zvm}2tusoT4d& {AH ]vRpAĸ.~R e.яןyfk##m Z,7d/+y6o5[v뱛piR^>p8UOt+pdG''NwEYtˁk(nĔSW]qLVa_sua0RWT ol*I38wTO{ǁە?7fҚo 0nӦ7M%rk!䯷-PֻT7u@qA?b}:@yho}2yĄWm8x:΁ݢyvX=>ߋd u}! yVgt7xG٢$u*z_#Ɛ"K~_ϚՅp> 9ZM &H_,)2xm;S֧=&9ໃP)"qtx#>G1Ǖ9SXf)m\CD^鵙ˍ6?/%h ;qٯ4 m'6^a06yw׼hgȎʋKݳ,.܈}zBKsۜxcCE#mD0N*<̠ײ,L^Ptl=pA7C~}N>vtRքX]Hy\@{ Vk#fP^d#휀hfٷ. (Cm8AbC3X&≲H_٣Yq܉ 6>+~0Fܾk]{9-ٹl?A ^@G?;ajQd\qq0-I \`Ĥ_E`ձPEӶB.al|#LGq=:;=Sg\xd ѳzƎ(/عh~oPWwiLtOL4(ui&uWP V5ʼn,joD\Z9NSn^GoPYUFqCB'uʓiQx׿ *}}Qy $Ill^'2x(^LrSɟV'U\htVڃV1Oi.G  )ܧp#foR jUPׄq Xْ4JOHr "uyiw~g:F/HN틼m"Ϫ=5>5Xf5&0KQ/Ob*W+!uZ;ҹ=,]ûJ,x* H'1[ܬaBPU߁n۬VF 4EwE"rjRa)9(x=N2JR Ƶr>҇VLS}娛76_@zVbXI$a56[]YG+nm"2fre!.UnZ-Hdm eq;<:jO<{[OH&t)8윗N;Tislnǽy KODv2NY]׼b1Zԋ1Ɔx&z)Wz)ikkeó-F5 c?&{Ml~ǭ ZmT?m=I'y]S(ZTrD<ƧCQ?gЗ$zǿSDͤHS=xCog*%1x~kpr+|C| W""{G< e=sʢ$?M1M<>yԈ&SCx(=m:)(rh h&sZZ/Z/iiSS2{}|>jKfɬB*vrZz2sqmȹw&,nl)=jSfrʲ%Y`X--'k^%bGh^$7*Zx؎F1T= *VwA^'dޏ)HHKD;F=ܟh-F_! 61oW\yG2I1OLptAl[8 1qY-Lq}snKP;}2Fy:Qm[wKrPF(o9+:Bۘ kϙ1⥁$|Dyn:;}d͟> Nё<2w|*@ȳE9%QpԬRbIhB&,2i9W[܅s\Q`,\NCES} + RaN'A;49#:'h1'$=!]t)|J(S/Af$Ψ8Rq30h\tɉx]?@Fk-kuG, GV/[ ?_ĭO$}cY=Q*nf ߳ƜjK9lrpcH5ԳՄ5TW<)nS0dʆImI?*Ց?-1{.ȌkJTಘO:tvB6 ȶ4FuL,-ql;_U 2o" GyWVy\Vj]0e]x 1(žE72(iF.-cC 2,MCh1o-Q@n z2@yܷqgࠅ#|=m<8-p#)yİ?|y;O NلQ|:vv d!I)R YFRi=?5*'qX2r"`L -[=a01FZTn4r1ܞns4X =T (zvqtN~I2?uphl 2R 4762eJ{27*&1WQޮ+g_8#=@{b .5PgxWȐ]TeMbؑW3 J h+}B]R46Kauhf~s`m,c!T@ 2K3"VWʲX!@Lݣ&67]48M@Uxb VyB[bz^)+G{R2q7OeqgWJ6L@`/osC&Y\R}8_@{pb.p䢤X~?. P=O!tTa#, ͺ<KjM77 MI&qtcO ;RM38B[q}oɓ^8M6lorp4Alpw))z(^밷m%@(Ż4&~.DHQO%xz~5gy֕V|)Nێɇwb/c,=nÉyĦw"^',B)'QS T(pI( ~ %a:m"뺭t:XbJ)JliIi烉Mϭgd2.|h]dnF'^Q[T{dO<,43ŵ+=ߡp GR4u~ԡ؀F1II#7xE`Yu"-YSm#ƎU$xȧtZ#h?<=gʺYPNͱ|~P>j#e UYł>v5Wp73_(\d2V^ʗfHcl d~%C'RS,> EV>f$FL-5A,jh>kR }ncIXUu{ f<6p'$+)RʇZ5֫ES3LI,^235 a?ŸDe)PF8qĉ1&Q .yL޶ rАRQ5V^okbf2!~W# Sk 2;4W1K qn0s!xG-b׼fΒdX#B0Id9R#NOu[ԿtC60/)u1o JR5)k!θ^{ V46ocr qN, dh#(8mdEUlvH>[B,$J)xtlaUM_z:Rһ!; ãाtx},NXʾo!ӭy֐9Xtp~swAAV8ӧ7 ߢ Dco4(x>\V b^dHF}|M߅{vPvp=TB;U*Y9 w5 ӫp!qR7HCB;ɫiG)alzV`9::nhp)&p4Ml`㡞|̏#9.p;lgXI!rXgPci*[2~0D~?DO\f77,$uh[~ø7PHXG"E1|J[I}H>xQYd)$Rϧ%p v}(g! ρB{k!Lx?UDSi:9Fnz4JI e*3a\-("3?kR(Bza>u(^`'Rz7HwWaQt$<{͆{PB v 9) +h#~P" J|3k:[O$\r-ޡy e;`vJශ6b2?:7{eó#^'M܆>Y wnI8&J$!hQ+.dq@$|Ȓם$ɊAѥ('Q~I)u!W[#;'+ߥ]!I騂X[O4 b ~w0xG),ǯ;~>9eݛ;bI ;vr : -_DJ.:ς%zw|{v4Ȃ X)}9EЋ6-eek^%X<nY"UHx13*& R{̖dr %v5@N۝peN+7MtwdʊeXHyWʨE5C;{垙 퍙n.&q;R@)P"l1i`y:9"cLTuݗ_EߩM!F|I'A';\ϲv d4nibʘB-EsќqA  Ú_-ad+h?5 mE -kxxAMaaü9:(Dsu{aĹ:#V̪=h?M8.Ds$h F(Nnf@i,˓ah'"@;L, 3ۿXz-%Pppd;"4S yཡ2'Yl |ȖʰÈ/bxR(+$c&CMAB^ %s7`mހ^_C3+2^q25Pj+X%#MlvӡU2VjN[J̾7<< "+6rP*w1~$!@Py&cU,)yWGA)lλO3Ҧ/.EhGDJ kxYBZ4`O*1.sƗ&zAJXc*ўWEe3R#P0M/dL99 1;ZT 8g|wHt4)daܫ׏]%wH'EQDD5@(@|[(e,ȑ" oRrW*6qWnȳ4mָ9PQm&xz5 L 0IhF6ޠ m"7Mp.{o]?n5voEOG(+AEl0PSshv>K :S xՋ:I0n9$|Tv_EG<_|53CǃUk8 d,j #@@/R. @ODI:0=ދg׮{7NxDS<3%gp :?lIC^p)QɳnZ9.!(\tX@JK#)nBi"V|PS@坓RAD!7h"{R穙6 e*1('قOJ6nRsf̋KKs-wY]~W(6TwY0-Y8WEd^tz6B#UӦXQ9?sU.u~}Q2t+thxS4i F\˵u6h:xm 8B#Muwo@9XJw23$+IxI ILKeZ*+l2- FEL'׃V)` 4ƙ:#I[PS%!Tq!2ө.uMmm tۃM*$?Ofxqg,겡[f˜Cb@8 ₡ rQASNTX(͝ V'~6J3׶FT?}42i~ũ|EݜN0^/{fiKFRbpL3S9(MBh=gb0qWa(8"9GO=؀(rɐ 3w<b*1:bI#8?|`L -6ʼ3=\Ͳ+A7[N86:<%^iUi+%E95)db$E$cSa-qOwGn^ww'7N9{g@ >=M3]@rEĭ-]au ;D=!M?DggϢ0h_6I/@Vj٬nZsgfr$,`e0X"`W$ؼ}T.#)j^L <[S.emDK-9~2L* U]AGm9AYK&,HQkDV%qgJuwꏠ"Z(R6"Pqqza0i EaRc}[kca[zО}+y#sJyscAZד^$= >`7ZbL녜= ?35[N?'K Gqo ե`)!3(U Kx˱8k`4+n<,OQP0=SY@(&RJOd_nP| DPLr*n y(+ebmiЀ!>!'ihf/X<(fїd+&qfY`)v4d #}e>* \kL'<9(L84OSji3` ,">8nz4>YreP@1;ruE{es: Yx DG%8izh\A(.aD z\#5ahd+( ש%CX4\Z;(A ,j8]w!ܛ0Æyqɫh(_) ߃7= 02LBÐ4j=|񂞑һ sd^*\2'%nTaB}^^I,l"caIZw ŧCi3) - ON)p׽xG Kbz"o/7ala߷a 08b7rS$:'ÅFI#8OMV9/Ve`e5*~kź?~|D啵_ Ka^0Z- 50m 'C0Q h$#'`Y^`_TKn691| ,^"6Y +0Y񚚖݉٤q1&n|VH҃Mb.IwުV]R??<ܫo"B/KiCq\02U:׵\H-'EaAيEjg'g &z#\A@OQȲ – sݾR"vHJb%x䀬G'v&CY+WZ4e`G -0?ݪJz J%'I$Wwo~ic7 N3MIr\+ KQ^*&|!.E,\ftC$9dUF"6aBrCDJ@ڙ >|r>=P^fUxiFJq:B}bMax7Ì0㑥T+'L,8D㱳63TB4yjVg9ISWTYzPw ӕRW(RzHWDꭂ::VP +WR膁(XV%uIȪb)IGBb(ͣGߔN*YPCߡ@IֹEN/:.*ECvPhk3z`.G`{utlzQ$- B99@R/8NJ m܃$ c?wBFB>&/A Zg?FoԼ9zٖ4Suׄy)-⺊÷\,Ê((8Y*T1<ܒ5.Dj~&23Dwؒԝ}I<,-2 Ze6FE2q32ZQT"ˁC'CEX,x1]̄a V#Saj-]#Xz4glM,+F9D̫7|,"*rmسdK_ՀrnC;jh&hеAH,Ƭmz+A"=G@ lZ1y JZ73Ⱦ ױED+jӅ;nX9-/eNChv>5^^8=q*sQ!L}] :6:bH%ivܦJNȃ|70/Ŭp00?EL(A;G|*> fLLQbvus@Ϥ 7*7jkh-QnBKgRp>pو;wt Bk|$~%f+;My .e#-Cg81U͙Grխ 랢 6ȰJn[pG?>1U0+0aqeI*dA`oEH{M jіړK;Ě7Ľ ?B8sW2F9xtAb0\6O1q_`$N=WLqhȔn1+u6~7\6 ιD[ֻM,wἹ{j"LS 7BgMh6YyÊB49'R #8$Dq?|<% 5?TDPݱF 8u0ii~io%1 (7;ڸygYc2Ɣs%K@5E3rZ)%bizOA2cG-: BT4w *2E$JpQ181&LJ{nYZ"(8Bշbo LSQDA7m7AM$񞩣qCrǭhi`?pE:`R+jc %)دnf (In4R˰f{j-k0\բz!10k9xUt֛\G.淠lkbkQL9F7QP˗=xIox&-f\'kߘsJUW8zn XZ ?'mw⶗Shӕ&j~T 3p>!]BK ܩ%utSe찜?@f / dEi5٢A7%#PaѽDwP[CRJT/h-(@/i\`(nyJjF1c888}گ7vn|cNb-$GNzm!;|m]KAS8 0ۗUu[ tՋ:/uB>/ XĵNKVwcK~l"zi 52ÈFq#hE4Յ&zфzKO"t[n:60x%/Yaϓhiߠs *nUZM+5e9d X&D!=[P#tPBd^@z)An4W{)w~2~%1d|>2r뺻>29^Oq'}63O͵wtn~\?GO-/+(*$nnI}A ^: il!:}<{W|# Y>|9f3v?+h͝)*s`N]S.gQYyM37K@2ɳɦ{,{D [ 2˰BbV+TíTK0CVWMX |+מ>Jq>87AE P0D>BE2Ut76`~vR\;J_ҬǴ4=icVF^ xy'H^+bGm.'j!} nŘ7{/~_ߟAuP_(Ͼ/1Ed *l0&(ћHZWTY}j k\_{v6 'd7ͷ]{3pq 0aR Ŏr .ʸw6k {j"[|S2 "٧VDU(ր%Ԑ~ֱU(Fԩوשэe:{&#(X֙ &g J+ g4^pC-f !^*Qs?NgOc+-IdKƺғwCU1/ lP g ]޷kZ3Wۢ`v4|6`vgy|sw7ZĕB]g܈h J!VL>="f^*}~ CIk jTj}9RZ&>\PݠRH3u}AwZcJ#gRƂZ/Z}ԺIz75FsGxAwZcJO>Gj}IOz75FZ~$\k zS`:>GU);B1L> !bZwbg֪}Z십fPlW[n}U[nz˵L(6+;kPl%Wmq5B E-MiɐmKO<'3KЄy5+1r G|˨-1 v‚m=~gaVߝ?KnuƽRp(hd c%-Ph)hwqC}{2n0t)Io@\ D,4e5n}qiLr.ˠיL!= ]|XR]n;OtpQHʗ0Q(u}j'7oRu_PcnJ%_d1׆KilȀ~UTbch# ௢ͥi-v@ƜfYrDbn"K-,-|D>" OPagKm Oxl^Zx,b]8,D>b],C>b],C>b]x,C>b]8,C>b],|C>b],\C>!r]x,-]8,C>)],B>)],B>)]x,B>)]8,B>S]p YG?{['@L_%_}(OΫ̛88w*}n}W\- u>^}@~@~ ?fmD.ݏMjb “*fo0(!&卡R0n?sZ T2SJv3;uTuNAt~pRUi=JLK"Ǝw^yjr3/Ŧ'0L|9: %!ĊHP Dzy%A[p^bcO[ =pp?$$̞/vp xpߓ7Q 0 >$ KZFO-&`ؒ}m鰃k8CO I %}*E*^}MC٨uda[_9"yVeܥZH3Asj[)~lƺoHB>L.T"j)۵dEX)l^#)*7U?Qڕ)yLm^p{9Jלb<1fٕOtk&8w~eJ7Ӄw-lyT,ɭGeī>J4IBu!Mtȍ;4iYdN%Χ}8gw3!ɑ3)Mb; ̖c tmR{ g#Lh*1 1{/610)N{c >iBNf= ߀cs sf܉cw{ߛ0l9A2W7;{R/Ę-KqKplKm^\:&`҆^m`/O l1;>aˣ&9iփW A7RPn<, WQyKUS񅎪23PE!>lVeV e\RRegZƌiSZ{Dkߤ\TK䐨6BƵckmË\8!YoDCǕkDA-\5Tw4y.1N=M>aa$1JkͶDS1 aN3.j4~[[{*߄ Fdn&M8սivǕbfo=bz|VY]γKQ0yU&۫27͵jg66V+#'45S:{Eei|zR pkM>hE=T ? oTMj#ƘQO|԰}{o6)89p/;,gT, 5O?ajZ/S#O2'rE\;[ ípka0Zn- [ ípka0Zn- >7í7_ ?R +7[rׇuMJVU.W ϦJhGw8ᰯ!/ʤl~ZR4)7qֻAԻW2+A;o!/,;/ gbbuB gj|DQ_ =ݧSz?<./Xj7e`K}3-[wEV2L&S>s-g7A$[8ǺİsmVǗ<,kyZ#xQ_Vsݬtj 3q4= ΆQsċa8B<6r bCߥŒ> :+WSXTwq9|8O"-^=~mZw( ;IaXLXv?| sLVLډ3lixS"lIP^)/NL:l(D2x fR% 999ypďDSDP%J 1yPqVIKjlRh B!b~xNqx6uެQ.V!XTYӕ޼Lnv,=P|s5g 1ܓrb"JB!2Kv} k/ɲ%PnZMz4u4(lkQB"Kc"}J(-jnْިG*N1wHFrXGP~z<3  9_O⭂8[q_ YԘJfQTR&LDa*jg**%I/Ł.B%J _, XVU:pAQ;m[ [@J%D\,a` dkݠi QWPb8A jImw7mKTT]I=m.b HI7?yu\定oV;m=h1ٸvnjF9}6Aْ$Z"QZóTySkh{fPjVIxt)ߢoo8EhZU7yZYAg8"!S>8HY^Hu֏}U3.^e{QKX%plC9=>:QXJ8n{FJeI&M ؇ .+I1)[7TD?29lpY:,k/U,,_ o3ɷe$В"c%Gĝæ+v8zB}Q?yn%&2X .Omۺa"^zn,1*}?f]w!AfA 3v$k͋h&dLFOO^  ZĽJE ͝#/\{@~8>3yJ/m Mu7k^Z]̑ߍ{IH.XݩNQ{{KpH 'SQJR:^Kp$ 1I撻u JWtYyUmLpQȞ*dbE vTۡ{eDl!Oޅz6"$\D땼"BR/!>`TGRBGu$Z1.Ԣ#0i%@/0ό _:p :ii7IGIz0i%p͂ |o{^42=*@Oj!~bv>p<'7Vk˵kHshU<\w8 M8NGwdc2?~tl}0Y?r Λ@ra0\@. 3\I֏ƏlMg|\>==j11c~&⽧<̨T!$6U'\j7V-߉.$րiTȊпؙc%9-Urj97sS r z̙L0BV2|ΞTb'GA>oKۭH-SK<Ǎa=qcdFZUXj de}1!#!9ymW<-{?{7__¨4CZjH͉%bC!P [~ZȣdI iAiAu7pz;?gÁ[?}ԏ|=N aO~#F(PT6?p,sWz@xZ> (1Ab*~Y'Pu:4YVGuO]k-Sp\}Kq5Z+-ϧ vD}Ej }r_j-./TY˧bїh+E?NVN޲:)84߆( ^<8Ԣ?0.Nыzԋ#ϟ_+A#,0 t?@1;bTTzww}f_sEK/Cn>ycd,y^A҇V:4 k(웠ǰ y@B3n:'gPh{6bw Zz~/:sʗ*wɐVjZS$*|ԿK0z`I8r~WXr0D3"h73شAeBmAdlAdlAdlAdlAd`y9̋㯠܊{@h,3e sq}¬ph07 `/3DNUPzAnѝTi9WfNN"Z?j/B*B+#RHt<ٚab ,h7[5Y(g.Ðz@40;e3{ -;T ظTQ;~%,R4 PK4F@XQD -F5!D˞IU}e]#/M-N"Rsj|JU}v W@=ĀؼTOKc0UhOzdTzh4 1yw4) `2fs3B9'@x0@x GGx0@xdx0@x ez<=A2<|?G)>D¹hzENpdEdɴ}Dmst-ڸgҗO}@e_1ttG=axD6RhЉC xź/ # #YQ xfKsԯ!-E@a\G_k!S@h hp*JѠr_-h#ɲyb#)+5^y4U`2B |=-:p' (w:CƔ&gMր12W3faͩm(-{fo͚YAn/պ:9N)>tϯ^3EEo#_VӾWЭE#a4|se(>x2J~=(yJ% Ξ AԠzVIJZ,?=h nPeНR_rTs6E2"i?-N_w5XNzLkyfƳc ,!Qs#W5j'rpt'ȁF9f6g`yirZ# 8: |Jr(6zVMHDkžqiR l;(YH`@g$D,]!bGP(0MUc.pÓwLp@Vz­G[.hQrrhzCφ9@.볡 PRą.ӢJÒӱP)OKQIT~M:77 #W z}NoepDG*piiX Z%[/yIk%/iz|xqk%/i%ĕP%՗cZ+yQku%KT u#kkH״ F T(nlhKew:OKwL\. 6ga\&8K#DOLb:#G%Bc:]ƘHjC) >ԅp*>&z0`~bD{h;#SF< tGez (Y#K+*vM@؂* >)i}*媧pY@ؤ5@V wr\qsž^q: ;!3> ^yBG;Q*}{q%̦̂4N+;\"d a:4*duzo(ALYk yOv` flvi=E35v(q~ cHAow?=@%ۿC"=~u0Pc/*=z{S ?sļOļ9ZbJݖNЏ9S쌿'pQǎM*ܾ3X51u"1EgkY:AR>K0LfYΙsu''3_cfB\d3_'[ӷu̒~ѤWoh$KUiKR5LÌq3qr wt-"AE]T#yy"srvJ,M`]ME^w,OV.L ܌g"V6%ГsEK~xE` ky"K3PLI8_ :}IYU"L[뇡GwEjBW8q3AG0%!L#=u"yq!ùthDk,z cs6uyJ  A<&!kS%*3SzVOgUO|Sȭ9:A13jɔRLNȳ*/zQ=:N|o85ڙ)g֣NK\_}4jF)‹#ϯ>:gH FhFI.[ 6 ܍(HS$C׌8˥3^&J!]9xlfgNmnD܄HV9:yd| NO0͕mn?x!oʹ'g$3ﯮ^\}q%o$PApsXVpkfE~fk+5\*r#| t*D]~GѱkٵjT3 Kr!p<\t<K0q`II >PT#w GfXK4R'$ZͲVTpGQQNTӰ0`g 誛룜Zףy:ȋ%'ۺ:ݩߕ2 qXӕAi *GeG45遧RDz#b/XeGdPCʡ[ՀN=/kZ=mXrs=AkrctRp܅l7{7%&g%T 64ghcB7{Jؽk+E\"k 2f#u%<ۅ%ЮR2mɕ!m^kZfMݞE[;'сׄ@+ŀՌ Ve#` B{ک^) '=k(+SL=dD[_JpRp"?T=}1=w:>v^H>x}}[HB`Jks_*+tg2~}Xr6uGekpik@ݵf #j ~slELKՇ鯤ݑ~"#þoX=Dѓӵ̇1\RߜZoé՗Y}afBsݨFAn/(QLx~#۽|?K \,PcrҪfb./)qa+-Dt)&OCooMӚywUݴUG\7j&B LFYqV ooFbAΠ54r >|5;0G28CN>.EG|X=<*Q ִ/Q3R0>u/3Ū/Of[Q"hԦ;mp\ sb[6 ᔹ/tDIGxL>Z =T3TU9xJ 20f _0pހՁU(qL*Qx=G ļVzy(&e'7f(| ΋txƜz$ip D,]NJnT.`2 r^C{s,4O$FX#$xܼLpc͐cQB(hi@ݼ]kn]0XfOj^$w ~I=<@^:җ}=b e!<*CSӋyG3!ޙG2)( X*8f2;LF=QT5߭MKE\lEmKV )/Q]ؼ/^@O&R^.FKG8WYby7&_IX$87 ;IoP,gm./nuQKH|+`]`fJ#nb D:[1,=?|a8 s]ś\WIz\WAr%` }̒ 7pD&BZKVl@,ʈ(Ef,\C#pH8Փ &fQ|+5DmD*C$vzooA)$#lK@їҞ}C@ה.I@?z2a{ǒ#0$9 "CAސX㉲!H6:% dؚBxfp` XL#.mT\AX{1 ,sH}iWVh>Siixz@cOfA>l!{@Xc쫛ak)PeAy’@"Yk'|^>T<2A臠v∨ CFUt-Wo>p?0y.&G,KJ\pL J%(r;[j@Ghu^^Mh*kLyB`4 /m$#EJ\?ø%,ζQ&~C &ޖa8ДZPy$h)3Ku$k ͨz#\y^`Q_A8ƕQѳjZ"u~-Ǝz\L֕ °R \ƥJDzIW)K WxD3u( 9q WJ32d p{pJ,)¬od{-jf@ulah0jP[X1NI\?\파65Tl=?VՄKYUJ 0kݞbO:f)b}+`\sq9/k:B2f1TuRmDWn6Syq1S:e96Q]yWjr,$A'V9AU4̝hҶ`{me^]^a >q/Q4s 2/@kiIS"X`? {KIT"5:,m˃ JU"n5[xTC&07=>ˍƝ% G4֡=̱DA(n y4BDeJCȩz_IIoowpZkz AJÿ a?B up++?{܄҂gi4sXB24?q˞Q}H[]W`8􁄔T3FOYOTdn[k6&{ 9WLK}\ TCAb$0Ƿƭ8=Z8ӟM8D5~ᩋzAډXnRlQBB8iH`T}G1ؗ; mn+& t&,grO jM.D@09mdr P|éxA<:sV3EZBTV&"=EGpUon8y_ 0f}cDhDJ a,<@[ l' ҉c!@0d.å}#x0ӢTIiyNrXS#Rr̎Do@6$A7BFɕI}F+ڐZka nb^kuԛ7Jdʑ6qpkmSx$%N=t- vB!cP .rrLZ]2+ 4Nh%7:'AVcFlHȇ xP=O I@`n&gx0H\7YaDd>?QR; * oը}x1__{`_et=uۃ7O1D޷qyGTϷV;b-tB-tB)t9Ҋ]?y|nuW n>&|~߁)i /㸃.y@icνM]9^k8r^77r"/Is=_[r >^?;]ͫ},0^qE ?;RWOjKǿPj\9^Jۮ+I~oKؿ+W{(/Phkd';=mUGK<ljq d';NvG`0%~O"  wO_:?R6@7sE=/>w|;>_?W1 ߟG@yr9ld';Nvd';Nv8]spF>,;YKo\=S$O5:6i#QwM;Nvd';Nߧc{ zg=_Izw* ^ܨeǯ?uo>ŋn-o8~^\N{4 XfF ?І< A| Lh 9At|x})1'a}ǎƸٌSǧ -҃ 8> E^X6'őgO/hG uIE~4ɋ=9>2㗎_d3yr2}ƒ;]OGwY}*E]+R D;~9O#/??@sQ3R>Q]>/c A9>)xCPr>:>8?>8~0vJyrS|9>wkP k[/O?u^9>E^EҎ_<2_| R;LJښ#ѧŽ~1Tۑv|Hqy ,rB)coԹF7qYF7ehiϔ74y#o |vQ8/N7pFѐh)Uђ =qHhNW:t1`XE݇t0SKg"{4t}j#0rQXFiq LFAAbQ\h*#h!A h/Z FоzE^|P>tRrA$`01ZCHJ@p-4喀֨D(Gz4O@gX[Kq- $)% ' R~uDNOw ! lrna^ CwD7jVt"@Su *EYӂ)O$j1O0F2.$p$.pK 9*,YYFj0mVM Wp駹Moߑn; ?xj~ yz&&SBX,":^ds\73ZQH%V"r<(ćHDM͠inmַ)h]]}wU +pκVw$zNݛ8 uj]'zaPMy Ef4R(A$ mR.QlȏeTMw-?KMAMow T);-]fƑ"N Qd$RE`0'"BWjc;b" qrI窴Ņ 3^^|hmb>SH n/ӡsjʥMF#>Սwҳ%bMC*A;5X\9B Ĕ8z;b&׽%/V/+DHW э֨cRk{-Ylƅؙ}]jR>לR 7 p!&t WBsgYxЯu0SRH0rC\z9 5ײS%3r02DZO_i/`;y1 ~~x0?{|!CH;7^ # B$ HA`L@< gA,|Y>g~3`Dv<-&`' T^X_f_ Yw^mw4j-&6\y"^7g\ې1w'X*?+r,*`k< ĀmY<)nײU߹*[2eDwivXcuiYv鐟L63f`θbi2`o,;6﬏4anݗXelILktZ{ur 9b 絝/ab##W'+E^3yq.dDElFZ+lKC/XvNFMֳ@bu{7kN*m]y[b]znx5ٌX|X踱h,cFޛ%slɱG15)b~*r#6FlEo=XH. [ X.XƧD֜&P]g{4eWջ q>ows ӌKlXezZGYj4;Xۙ\~1fQ )|!-,X%څ,WB0K-\bxTj]᰷ ޫI-b' e^n7ōMQ*v&# ,lQO?>#g(~TsA}#&OE3$|Z1eqQ|>Y8rIS ?[MrIu+ѼvA#Q1_ B\Mw"Ky粗grq b9-|jguGzZjJB7AvMJBʍT i!\MPvs;W1қ荈*Jx,'Ry>DL p >4/j):z nE +Rd$aMb-e&͑wN8}\H3BۧF7Iԛnw͖o ]5fJ k.'}"vz5V>CL7{6tFiD!jdzi[l°n$Hu6O8Wo0xP&ٌe 3xX{ ]; ή;倛ZWfaRfUʢvXs(T(y"YIzZkʽ.ayyW Քw'מrm,VlJ r&L{IW8N^ѝ61Wm,f=FsOʶ ~ i rRi1\O ػouih b(Iv*+b_i\LM\:֩Miee:ȗƒ[d3680v4RmZc]H) cc\67,Δ3s.X͙%+t9u2ə0fSx*_e|w]mIo\uwbQLk9ITіq/דSU[7>.jz BaUiΦ 窼J \m19Y21zZ[c9؊ * ga)%Qm5*Lwk9^#xLaSΛ"t3kYp|\,zZj-d|"ZEM.P(rFslgpq fb+w'R,4 g— U;MLֱ?U)O zE8 с30 bZ-pAvǓܪ2Lvr2P}JڲffӍ'z=ShXZ܎[r5CA,nۢ/MDhhbX6¾HҤ:OţtbV]/yr~עcOy"وv'Rd6[䢭&WzhF6$,Toj$8n,2Gf&uv dS5\a\lx\( emqu*R5Y "$ {L&Fz.!cugv[Nv! zӏT%ܮgB<Nl9|< Jp7_Xzx^w3|&3p+GBwuF^'RQ,fI%'k:U +z;$U\NwfVtQ5~qX3Nk>Ѝ S"#"Srn<M$~5`9i|V`X*κ1˺ q 9Yr?V^,Llw~K wlabN P%~Ȏ'2,B3o^x+TYr#p ox=S.N*;%[Q?X Fp)7N"I|r1I$k~^4ڶI/Sq{du,1l *-Ѡ|}؀/lȮNO޿VgV]bʐ]gŎbJ{քW.lXYRB%DrF-7/+U=,Dm;NXS|'U\Tle2a& º+A$ם٥n VC/7;8TWBI|ET+Uzn0;t6w&S߉Mg椺H͠Y\AzV(v5*dj.gL(u-m\̦|lГjBQr)%,9Nz&Qbvգq(ckcg)|y ª2 u Pϗ<5uq}Y-wJNM![ly±a坒{|Z1+ V"6$"h~j!v@xZ>O fyV7f+ Ey$\c:5, wbD9s>ŘBIj+dȬIE&26׻pv\rv;chS$EqA5XoWno oaM9'|ÈJy.>q^b6`-l0(b쥼S'MSS[L[G`Ysło(2yxrPdܫQ0-q cÁZms1΁t;=M B1QswgB8BkvY)lMI2gǙWѶWBJS@|`vad0L}LG®?Wd,i\UxVsy (fEak Sa] 62uqߩFk|ڳQ\tXe@/nf9{lS # - Add the above to any other configuration file (for example ~/.my.ini) # and copy my_print_defaults to /usr/bin # - Add the path to the mysql-installation-directory to the basedir variable # below. # # If you want to affect other MySQL variables, you should make your changes # in the /etc/my.cnf, ~/.my.cnf or other MySQL configuration files. # If you change base dir, you must also change datadir. These may get # overwritten by settings in the MySQL configuration files. basedir= datadir= # Default value, in seconds, afterwhich the script should timeout waiting # for server start. # Value here is overriden by value in my.cnf. # 0 means don't wait at all # Negative numbers mean to wait indefinitely service_startup_timeout=900 # The following variables are only set for letting mysql.server find things. # Set some defaults pid_file= server_pid_file= use_mysqld_safe=1 user=mysql if test -z "$basedir" then basedir=/ bindir=/usr/bin if test -z "$datadir" then datadir=/var/lib/mysql fi sbindir=/usr/sbin libexecdir=/usr/sbin else bindir="$basedir/bin" if test -z "$datadir" then datadir="$basedir/data" fi sbindir="$basedir/sbin" libexecdir="$basedir/libexec" fi # datadir_set is used to determine if datadir was set (and so should be # *not* set inside of the --basedir= handler.) datadir_set= # # Use LSB init script functions for printing messages, if possible # lsb_functions="/lib/lsb/init-functions" if test -f $lsb_functions ; then . $lsb_functions else log_success_msg() { echo " SUCCESS! $@" } log_failure_msg() { echo " ERROR! $@" } fi PATH=/sbin:/usr/sbin:/bin:/usr/bin:$basedir/bin export PATH mode=$1 # start or stop shift other_args="$*" # uncommon, but needed when called from an RPM upgrade action # Expected: "--skip-networking --skip-grant-tables" # They are not checked here, intentionally, as it is the resposibility # of the "spec" file author to give correct arguments only. case `echo "testing\c"`,`echo -n testing` in *c*,-n*) echo_n= echo_c= ;; *c*,*) echo_n=-n echo_c= ;; *) echo_n= echo_c='\c' ;; esac parse_server_arguments() { for arg do case "$arg" in --basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'` bindir="$basedir/bin" if test -z "$datadir_set"; then datadir="$basedir/data" fi sbindir="$basedir/sbin" libexecdir="$basedir/libexec" ;; --datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'` datadir_set=1 ;; --user=*) user=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --pid-file=*) server_pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --service-startup-timeout=*) service_startup_timeout=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --use-mysqld_safe) use_mysqld_safe=1;; --use-manager) use_mysqld_safe=0;; esac done } parse_manager_arguments() { for arg do case "$arg" in --pid-file=*) pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --user=*) user=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; esac done } wait_for_pid () { verb="$1" manager_pid="$2" # process ID of the program operating on the pid-file i=0 avoid_race_condition="by checking again" while test $i -ne $service_startup_timeout ; do case "$verb" in 'created') # wait for a PID-file to pop into existence. test -s $pid_file && i='' && break ;; 'removed') # wait for this PID-file to disappear test ! -s $pid_file && i='' && break ;; *) echo "wait_for_pid () usage: wait_for_pid created|removed manager_pid" exit 1 ;; esac # if manager isn't running, then pid-file will never be updated if test -n "$manager_pid"; then if kill -0 "$manager_pid" 2>/dev/null; then : # the manager still runs else # The manager may have exited between the last pid-file check and now. if test -n "$avoid_race_condition"; then avoid_race_condition="" continue # Check again. fi # there's nothing that will affect the file. log_failure_msg "Manager of pid-file quit without updating file." return 1 # not waiting any more. fi fi echo $echo_n ".$echo_c" i=`expr $i + 1` sleep 1 done if test -z "$i" ; then log_success_msg return 0 else log_failure_msg return 1 fi } # Get arguments from the my.cnf file, # the only group, which is read from now on is [mysqld] if test -x ./bin/my_print_defaults then print_defaults="./bin/my_print_defaults" elif test -x $bindir/my_print_defaults then print_defaults="$bindir/my_print_defaults" elif test -x $bindir/mysql_print_defaults then print_defaults="$bindir/mysql_print_defaults" else # Try to find basedir in /etc/my.cnf conf=/etc/my.cnf print_defaults= if test -r $conf then subpat='^[^=]*basedir[^=]*=\(.*\)$' dirs=`sed -e "/$subpat/!d" -e 's//\1/' $conf` for d in $dirs do d=`echo $d | sed -e 's/[ ]//g'` if test -x "$d/bin/my_print_defaults" then print_defaults="$d/bin/my_print_defaults" break fi if test -x "$d/bin/mysql_print_defaults" then print_defaults="$d/bin/mysql_print_defaults" break fi done fi # Hope it's in the PATH ... but I doubt it test -z "$print_defaults" && print_defaults="my_print_defaults" fi # # Read defaults file from 'basedir'. If there is no defaults file there # check if it's in the old (depricated) place (datadir) and read it from there # extra_args="" if test -r "$basedir/my.cnf" then extra_args="-e $basedir/my.cnf" else if test -r "$datadir/my.cnf" then extra_args="-e $datadir/my.cnf" fi fi parse_server_arguments `$print_defaults $extra_args mysqld server mysql_server mysql.server` # Look for the pidfile parse_manager_arguments `$print_defaults $extra_args manager` # # Set pid file if not given # if test -z "$pid_file" then pid_file=$datadir/mysqlmanager-`/bin/hostname`.pid else case "$pid_file" in /* ) ;; * ) pid_file="$datadir/$pid_file" ;; esac fi if test -z "$server_pid_file" then server_pid_file=$datadir/`/bin/hostname`.pid else case "$server_pid_file" in /* ) ;; * ) server_pid_file="$datadir/$server_pid_file" ;; esac fi case "$mode" in 'start') # Start daemon # Safeguard (relative paths, core dumps..) cd $basedir manager=$bindir/mysqlmanager if test -x $libexecdir/mysqlmanager then manager=$libexecdir/mysqlmanager elif test -x $sbindir/mysqlmanager then manager=$sbindir/mysqlmanager fi echo $echo_n "Starting MySQL" if test -x $manager -a "$use_mysqld_safe" = "0" then if test -n "$other_args" then log_failure_msg "MySQL manager does not support options '$other_args'" exit 1 fi # Give extra arguments to mysqld with the my.cnf file. This script may # be overwritten at next upgrade. "$manager" \ --mysqld-safe-compatible \ --user="$user" \ --pid-file="$pid_file" >/dev/null 2>&1 & wait_for_pid created $!; return_value=$? # Make lock for RedHat / SuSE if test -w /var/lock/subsys then touch /var/lock/subsys/mysqlmanager fi exit $return_value elif test -x $bindir/mysqld_safe then # Give extra arguments to mysqld with the my.cnf file. This script # may be overwritten at next upgrade. pid_file=$server_pid_file $bindir/mysqld_safe --datadir=$datadir --pid-file=$server_pid_file $other_args >/dev/null 2>&1 & wait_for_pid created $!; return_value=$? # Make lock for RedHat / SuSE if test -w /var/lock/subsys then touch /var/lock/subsys/mysql fi exit $return_value else log_failure_msg "Couldn't find MySQL manager ($manager) or server ($bindir/mysqld_safe)" fi ;; 'stop') # Stop daemon. We use a signal here to avoid having to know the # root password. # The RedHat / SuSE lock directory to remove lock_dir=/var/lock/subsys/mysqlmanager # If the manager pid_file doesn't exist, try the server's if test ! -s "$pid_file" then pid_file=$server_pid_file lock_dir=/var/lock/subsys/mysql fi if test -s "$pid_file" then mysqlmanager_pid=`cat $pid_file` if (kill -0 $mysqlmanager_pid 2>/dev/null) then echo $echo_n "Shutting down MySQL" kill $mysqlmanager_pid # mysqlmanager should remove the pid_file when it exits, so wait for it. wait_for_pid removed "$mysqlmanager_pid"; return_value=$? else log_failure_msg "MySQL manager or server process #$mysqlmanager_pid is not running!" rm $pid_file fi # delete lock for RedHat / SuSE if test -f $lock_dir then rm -f $lock_dir fi exit $return_value else log_failure_msg "MySQL manager or server PID file could not be found!" fi ;; 'restart') # Stop the service and regardless of whether it was # running or not, start it again. if $0 stop $other_args; then $0 start $other_args else log_failure_msg "Failed to stop running server, so refusing to try to start." exit 1 fi ;; 'reload'|'force-reload') if test -s "$server_pid_file" ; then read mysqld_pid < $server_pid_file kill -HUP $mysqld_pid && log_success_msg "Reloading service MySQL" touch $server_pid_file else log_failure_msg "MySQL PID file could not be found!" exit 1 fi ;; 'status') # First, check to see if pid file exists if test -s "$server_pid_file" ; then read mysqld_pid < $server_pid_file if kill -0 $mysqld_pid 2>/dev/null ; then log_success_msg "MySQL running ($mysqld_pid)" exit 0 else log_failure_msg "MySQL is not running, but PID file exists" exit 1 fi else # Try to find appropriate mysqld process mysqld_pid=`pidof $libexecdir/mysqld` if test -z $mysqld_pid ; then if test "$use_mysqld_safe" = "0" ; then lockfile=/var/lock/subsys/mysqlmanager else lockfile=/var/lock/subsys/mysql fi if test -f $lockfile ; then log_failure_msg "MySQL is not running, but lock exists" exit 2 fi log_failure_msg "MySQL is not running" exit 3 else log_failure_msg "MySQL is running but PID file could not be found" exit 4 fi fi ;; *) # usage echo "Usage: $0 {start|stop|restart|reload|force-reload|status} [ MySQL server options ]" exit 1 ;; esac exit 0 galera-3-25.3.20/scripts/mysql/centos/mysql-wsrep.list0000644000015300001660000000637313042054732022476 0ustar jenkinsjenkins# This is a MySQ L-wsrep package description for ESP package manager # CentOS specific part %requires /bin/sh %requires /sbin/install-info %requires /sbin/ldconfig %requires /usr/sbin/useradd %requires bash %requires chkconfig %requires coreutils %requires grep %requires libc.so.6 %requires libcrypt.so.1 %requires libcrypto.so.6 %requires libdl.so.2 %requires libgcc_s.so.1 %requires libm.so.6 %requires libncurses.so.5 %requires libnsl.so.1 %requires libpthread.so.0 %requires libssl.so.6 %requires libstdc++.so.6 %requires libz.so.1 %requires procps %requires rtld(GNU_HASH) %requires shadow-utils # Required MySQL packages # for PHP-mysql on RHEL5: # %requires MySQL-shared-compat $MYSQL_VER # for mysqldump SST: # %requires MySQL-client-community $MYSQL_VER %provides MySQL %provides MySQL-server %provides mysql %provides mysql-server # Conflicting mysql packages #%incompat mysql-server $prefix=/usr $CONF_DST=/etc/mysql d 755 root root $CONF_DST - c 644 root root /etc/my.cnf $GALERA_SRC/scripts/mysql/centos/my.cnf d 755 root root $CONF_DST/conf.d d 755 root root /etc/rc.d/init.d - f 755 root root /etc/rc.d/init.d/mysql $GALERA_SRC/scripts/mysql/centos/init.oracle d 755 root root /etc/rc.d/logrotate.d - f 755 root root /etc/rc.d/logrotate.d/mysql $MYSQL_SRC/support-files/mysql-log-rotate %if x86_64 # CentOS (read Red Hat) never fails to screw up things $LIBS_DST=${prefix}/lib64/mysql %else $LIBS_DST=${prefix}/lib/mysql %endif $SHAR_DST=${prefix}/share/mysql $SBIN_DST=${prefix}/sbin $BINS_DST=${prefix}/bin $DOCS_DST=${prefix}/share/doc/MySQL-server-$MYSQL_VER $MAN_DST=${prefix}/share/man # Commented out CentOS pieces of code which don't seem to make sense here $mysql_datadir=/var/lib/mysql #d 755 root root ${mysql_data} - %preinstall < /dev/null 2>&1 echo "Giving mysqld 5 seconds to exit nicely" sleep 5 fi EOF_PREINSTALL # Postinstall script is a combination of those from CentOS and MySQL RPMs %postinstall < /dev/null || true useradd -M -r -d $mysql_datadir -s /bin/bash -c "MySQL Server" \ -g mysql mysql 2> /dev/null || true # in case user existed usermod -g mysql mysql 2> /dev/null || true /bin/chown -R mysql:mysql ${CONF_DST} /bin/chmod 0755 ${mysql_datadir} /bin/touch /var/log/mysqld.log /bin/chown -R mysql:mysql ${mysql_datadir} /usr/bin/mysql_install_db --rpm --user=mysql --wsrep-on=0 /bin/chown -R mysql:mysql ${mysql_datadir} /bin/chmod -R og-rw ${mysql_datadir}/mysql if [ -x /etc/init.d/mysql ] ; then /etc/init.d/mysql start echo "Giving mysqld 2 seconds to start" sleep 2 fi sleep 2 EOF_POSTINSTALL %preremove </dev/null 2>&1 || : /sbin/chkconfig --del mysql fi EOF_PREREMOVE #%postremove </dev/null 2>&1 || : #fi #EOF_POSTREMOVE # galera-3-25.3.20/scripts/mysql/centos/my.cnf0000644000015300001660000000104413042054732020401 0ustar jenkinsjenkins[mysqld] datadir=/var/lib/mysql socket=/var/lib/mysql/mysql.sock user=mysql # Default to using old password format for compatibility with mysql 3.x # clients (those using the mysqlclient10 compatibility package). old_passwords=1 [mysqld_safe] log-error=/var/log/mysqld.log pid-file=/var/run/mysqld/mysqld.pid # # * IMPORTANT: Additional settings that can override those from this file! # The files must end with '.cnf', otherwise they'll be ignored. # WSREP NOTE: additional wsrep configuration is in wsrep.cnf # !includedir /etc/mysql/conf.d/ galera-3-25.3.20/scripts/mysql/centos/init.centos0000755000015300001660000001030713042054732021451 0ustar jenkinsjenkins#!/bin/bash # # mysqld This shell script takes care of starting and stopping # the MySQL subsystem (mysqld). # # chkconfig: - 64 36 # description: MySQL database server. # processname: mysqld # config: /etc/my.cnf # pidfile: /var/run/mysqld/mysqld.pid # Source function library. . /etc/rc.d/init.d/functions # Source networking configuration. . /etc/sysconfig/network prog="MySQL" # extract value of a MySQL option from config files # Usage: get_mysql_option SECTION VARNAME DEFAULT # result is returned in $result # We use my_print_defaults which prints all options from multiple files, # with the more specific ones later; hence take the last match. get_mysql_option(){ result=`/usr/bin/my_print_defaults "$1" | sed -n "s/^--$2=//p" | tail -n 1` if [ -z "$result" ]; then # not found, use default result="$3" fi } get_mysql_option mysqld datadir "/var/lib/mysql" datadir="$result" get_mysql_option mysqld socket "$datadir/mysql.sock" socketfile="$result" get_mysql_option mysqld_safe log-error "/var/log/mysqld.log" errlogfile="$result" get_mysql_option mysqld_safe pid-file "/var/run/mysqld/mysqld.pid" mypidfile="$result" start(){ touch "$errlogfile" chown mysql:mysql "$errlogfile" chmod 0640 "$errlogfile" [ -x /sbin/restorecon ] && /sbin/restorecon "$errlogfile" if [ ! -d "$datadir/mysql" ] ; then action $"Initializing MySQL database: " /usr/bin/mysql_install_db --wsrep-on=0 --datadir="$datadir" --user=mysql ret=$? chown -R mysql:mysql "$datadir" if [ $ret -ne 0 ] ; then return $ret fi fi chown mysql:mysql "$datadir" chmod 0755 "$datadir" # Pass all the options determined above, to ensure consistent behavior. # In many cases mysqld_safe would arrive at the same conclusions anyway # but we need to be sure. /usr/bin/mysqld_safe --datadir="$datadir" --socket="$socketfile" \ --log-error="$errlogfile" --pid-file="$mypidfile" \ --user=mysql >/dev/null 2>&1 & ret=$? # Spin for a maximum of N seconds waiting for the server to come up. # Rather than assuming we know a valid username, accept an "access # denied" response as meaning the server is functioning. if [ $ret -eq 0 ]; then STARTTIMEOUT=30 while [ $STARTTIMEOUT -gt 0 ]; do RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` && break echo "$RESPONSE" | grep -q "Access denied for user" && break sleep 1 let STARTTIMEOUT=${STARTTIMEOUT}-1 done if [ $STARTTIMEOUT -eq 0 ]; then echo "Timeout error occurred trying to start MySQL Daemon." action $"Starting $prog: " /bin/false ret=1 else action $"Starting $prog: " /bin/true fi else action $"Starting $prog: " /bin/false fi [ $ret -eq 0 ] && touch /var/lock/subsys/mysqld return $ret } stop(){ MYSQLPID=`cat "$mypidfile" 2>/dev/null ` if [ -n "$MYSQLPID" ]; then /bin/kill "$MYSQLPID" >/dev/null 2>&1 ret=$? if [ $ret -eq 0 ]; then STOPTIMEOUT=60 while [ $STOPTIMEOUT -gt 0 ]; do /bin/kill -0 "$MYSQLPID" >/dev/null 2>&1 || break sleep 1 let STOPTIMEOUT=${STOPTIMEOUT}-1 done if [ $STOPTIMEOUT -eq 0 ]; then echo "Timeout error occurred trying to stop MySQL Daemon." ret=1 action $"Stopping $prog: " /bin/false else rm -f /var/lock/subsys/mysqld rm -f "$socketfile" action $"Stopping $prog: " /bin/true fi else action $"Stopping $prog: " /bin/false fi else ret=1 action $"Stopping $prog: " /bin/false fi return $ret } restart(){ stop start } condrestart(){ [ -e /var/lock/subsys/mysqld ] && restart || : } # See how we were called. case "$1" in start) start ;; stop) stop ;; status) status mysqld ;; restart) restart ;; condrestart) condrestart ;; *) echo $"Usage: $0 {start|stop|status|condrestart|restart}" exit 1 esac exit $? galera-3-25.3.20/scripts/mysql/centos/mysqld_safe0000755000015300001660000004451213042054732021530 0ustar jenkinsjenkins#!/bin/sh # Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB # This file is public domain and comes with NO WARRANTY of any kind # # Script to start the MySQL daemon and restart it if it dies unexpectedly # # This should be executed in the MySQL base directory if you are using a # binary installation that is not installed in its compile-time default # location # # mysql.server works by first doing a cd to the base directory and from there # executing mysqld_safe KILL_MYSQLD=1; MYSQLD= niceness=0 # Initial logging status: error log is not open, and not using syslog logging=init want_syslog=0 syslog_tag= user='mysql' pid_file= err_log= syslog_tag_mysqld=mysqld syslog_tag_mysqld_safe=mysqld_safe trap '' 1 2 3 15 # we shouldn't let anyone kill us umask 007 defaults= case "$1" in --no-defaults|--defaults-file=*|--defaults-extra-file=*) defaults="$1"; shift ;; esac usage () { cat <> "$err_log" ;; syslog) logger -t "$syslog_tag_mysqld_safe" -p "$priority" "$*" ;; *) echo "Internal program error (non-fatal):" \ " unknown logging method '$logging'" >&2 ;; esac } log_error () { log_generic daemon.error "$@" >&2 } log_notice () { log_generic daemon.notice "$@" } eval_log_error () { cmd="$1" case $logging in file) cmd="$cmd >> "`shell_quote_string "$err_log"`" 2>&1" ;; syslog) # mysqld often prefixes its messages with a timestamp, which is # redundant when logging to syslog (which adds its own timestamp) # However, we don't strip the timestamp with sed here, because # sed buffers output (only GNU sed supports a -u (unbuffered) option) # which means that messages may not get sent to syslog until the # mysqld process quits. cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error" ;; *) echo "Internal program error (non-fatal):" \ " unknown logging method '$logging'" >&2 ;; esac #echo "Running mysqld: [$cmd]" eval "$cmd" } shell_quote_string() { # This sed command makes sure that any special chars are quoted, # so the arg gets passed exactly to the server. echo "$1" | sed -e 's,\([^a-zA-Z0-9/_.=-]\),\\\1,g' } wsrep_pick_url() { [ $# -eq 0 ] return 0 if ! which nc >/dev/null; then log_error "ERROR: nc tool not found in PATH! Make sure you have it installed." return 1 fi local url # Assuming URL in the form scheme://host:port # If host and port are not NULL, the liveness of URL is assumed to be tested # If port part is absent, the url is returned literally and unconditionally # If every URL has port but none is reachable, nothing is returned for url in `echo $@ | sed s/,/\ /g` 0; do local host=`echo $url | cut -d \: -f 2 | sed s/^\\\/\\\///` local port=`echo $url | cut -d \: -f 3` [ -z "$port" ] && break nc -z "$host" $port >/dev/null && break done if [ "$url" == "0" ]; then log_error "ERROR: none of the URLs in '$@' is reachable." return 1 fi echo $url } # Run mysqld with --wsrep-recover and parse recovered position from log. # Position will be stored in wsrep_start_position_opt global. wsrep_recovery() { cmd="$@" wr_logfile=$(mktemp) log_notice "WSREP: Running position recovery" $cmd --log_error=$wr_logfile --wsrep-recover rp=$(grep "WSREP: Recovered position:" $wr_logfile) if [ -z "$rp" ]; then skipped=$(grep WSREP $wr_logfile | grep "skipping position recovery") if [ -z "$skipped" ]; then log_error "WSREP: Failed to recover position: " \ `cat $wr_logfile`; else log_notice "WSREP: Position recovery skipped" fi else start_pos=$(echo $rp | sed 's/.*WSREP\:\ Recovered\ position://' \ | sed 's/^[ \t]*//') wsrep_start_position_opt="--wsrep_start_position=$start_pos" log_notice "WSREP: Recovered position $start_pos" fi rm $wr_logfile } parse_arguments() { # We only need to pass arguments through to the server if we don't # handle them here. So, we collect unrecognized options (passed on # the command line) into the args variable. pick_args= if test "$1" = PICK-ARGS-FROM-ARGV then pick_args=1 shift fi for arg do val=`echo "$arg" | sed -e "s;--[^=]*=;;"` case "$arg" in # these get passed explicitly to mysqld --basedir=*) MY_BASEDIR_VERSION="$val" ;; --datadir=*) DATADIR="$val" ;; --pid-file=*) pid_file="$val" ;; --user=*) user="$val"; SET_USER=1 ;; # these might have been set in a [mysqld_safe] section of my.cnf # they are added to mysqld command line to override settings from my.cnf --log-error=*) err_log="$val" ;; --port=*) mysql_tcp_port="$val" ;; --socket=*) mysql_unix_port="$val" ;; # mysqld_safe-specific options - must be set in my.cnf ([mysqld_safe])! --core-file-size=*) core_file_size="$val" ;; --ledir=*) ledir="$val" ;; --mysqld=*) MYSQLD="$val" ;; --mysqld-version=*) if test -n "$val" then MYSQLD="mysqld-$val" else MYSQLD="mysqld" fi ;; --nice=*) niceness="$val" ;; --open-files-limit=*) open_files="$val" ;; --open_files_limit=*) open_files="$val" ;; --skip-kill-mysqld*) KILL_MYSQLD=0 ;; --syslog) want_syslog=1 ;; --skip-syslog) want_syslog=0 ;; --syslog-tag=*) syslog_tag="$val" ;; --timezone=*) TZ="$val"; export TZ; ;; --wsrep[-_]urls=*) wsrep_urls="$val"; ;; --help) usage ;; *) if test -n "$pick_args" then append_arg_to_args "$arg" fi ;; esac done } # # First, try to find BASEDIR and ledir (where mysqld is) # if echo '/usr/share/mysql' | grep '^/' > /dev/null then relpkgdata=`echo '/usr/share/mysql' | sed -e 's,^/,,' -e 's,^/,,' -e 's,^,./,'` else # pkgdatadir is not relative to prefix relpkgdata='/usr/share/mysql' fi MY_PWD=`pwd` # Check for the directories we would expect from a binary release install if test -n "$MY_BASEDIR_VERSION" -a -d "$MY_BASEDIR_VERSION" then # BASEDIR is already overridden on command line. Do not re-set. # Use BASEDIR to discover le. if test -x "$MY_BASEDIR_VERSION/libexec/mysqld" then ledir="$MY_BASEDIR_VERSION/libexec" else ledir="$MY_BASEDIR_VERSION/bin" fi elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/bin/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where bin, share and data are ledir="$MY_PWD/bin" # Where mysqld is # Check for the directories we would expect from a source install elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/libexec/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where libexec, share and var are ledir="$MY_PWD/libexec" # Where mysqld is # Since we didn't find anything, used the compiled-in defaults else MY_BASEDIR_VERSION='/' ledir='/usr/sbin' fi # # Second, try to find the data directory # # Try where the binary installs put it if test -d $MY_BASEDIR_VERSION/data/mysql then DATADIR=$MY_BASEDIR_VERSION/data if test -z "$defaults" -a -r "$DATADIR/my.cnf" then defaults="--defaults-extra-file=$DATADIR/my.cnf" fi # Next try where the source installs put it elif test -d $MY_BASEDIR_VERSION/var/mysql then DATADIR=$MY_BASEDIR_VERSION/var # Or just give up and use our compiled-in default else DATADIR=/var/lib/mysql fi if test -z "$MYSQL_HOME" then if test -r "$MY_BASEDIR_VERSION/my.cnf" && test -r "$DATADIR/my.cnf" then log_error "WARNING: Found two instances of my.cnf - $MY_BASEDIR_VERSION/my.cnf and $DATADIR/my.cnf IGNORING $DATADIR/my.cnf" MYSQL_HOME=$MY_BASEDIR_VERSION elif test -r "$DATADIR/my.cnf" then log_error "WARNING: Found $DATADIR/my.cnf The data directory is a deprecated location for my.cnf, please move it to $MY_BASEDIR_VERSION/my.cnf" MYSQL_HOME=$DATADIR else MYSQL_HOME=$MY_BASEDIR_VERSION fi fi export MYSQL_HOME # Get first arguments from the my.cnf file, groups [mysqld] and [mysqld_safe] # and then merge with the command line arguments if test -x "$MY_BASEDIR_VERSION/bin/my_print_defaults" then print_defaults="$MY_BASEDIR_VERSION/bin/my_print_defaults" elif test -x ./bin/my_print_defaults then print_defaults="./bin/my_print_defaults" elif test -x /usr/bin/my_print_defaults then print_defaults="/usr/bin/my_print_defaults" elif test -x /usr/bin/mysql_print_defaults then print_defaults="/usr/bin/mysql_print_defaults" else print_defaults="my_print_defaults" fi append_arg_to_args () { args="$args "`shell_quote_string "$1"` } args= SET_USER=2 parse_arguments `$print_defaults $defaults --loose-verbose mysqld server` if test $SET_USER -eq 2 then SET_USER=0 fi parse_arguments `$print_defaults $defaults --loose-verbose mysqld_safe safe_mysqld` parse_arguments PICK-ARGS-FROM-ARGV "$@" # Determine what logging facility to use # Ensure that 'logger' exists, if it's requested if [ $want_syslog -eq 1 ] then my_which logger > /dev/null 2>&1 if [ $? -ne 0 ] then log_error "--syslog requested, but no 'logger' program found. Please ensure that 'logger' is in your PATH, or do not specify the --syslog option to mysqld_safe." exit 1 fi fi if [ -n "$err_log" -o $want_syslog -eq 0 ] then if [ -n "$err_log" ] then # mysqld adds ".err" if there is no extension on the --log-error # argument; must match that here, or mysqld_safe will write to a # different log file than mysqld # mysqld does not add ".err" to "--log-error=foo."; it considers a # trailing "." as an extension if expr "$err_log" : '.*\.[^/]*$' > /dev/null then : else err_log="$err_log".err fi case "$err_log" in /* ) ;; * ) err_log="$DATADIR/$err_log" ;; esac else err_log=$DATADIR/`/bin/hostname`.err fi append_arg_to_args "--log-error=$err_log" if [ $want_syslog -eq 1 ] then # User explicitly asked for syslog, so warn that it isn't used log_error "Can't log to error log and syslog at the same time. Remove all --log-error configuration options for --syslog to take effect." fi # Log to err_log file log_notice "Logging to '$err_log'." logging=file else if [ -n "$syslog_tag" ] then # Sanitize the syslog tag syslog_tag=`echo "$syslog_tag" | sed -e 's/[^a-zA-Z0-9_-]/_/g'` syslog_tag_mysqld_safe="${syslog_tag_mysqld_safe}-$syslog_tag" syslog_tag_mysqld="${syslog_tag_mysqld}-$syslog_tag" fi log_notice "Logging to syslog." logging=syslog fi USER_OPTION="" if test -w / -o "$USER" = "root" then if test "$user" != "root" -o $SET_USER = 1 then USER_OPTION="--user=$user" fi # Change the err log to the right user, if it is in use if [ $want_syslog -eq 0 ]; then touch "$err_log" chown $user "$err_log" fi if test -n "$open_files" then ulimit -n $open_files fi fi if test -n "$open_files" then append_arg_to_args "--open-files-limit=$open_files" fi safe_mysql_unix_port=${mysql_unix_port:-${MYSQL_UNIX_PORT:-/var/lib/mysql/mysql.sock}} # Make sure that directory for $safe_mysql_unix_port exists mysql_unix_port_dir=`dirname $safe_mysql_unix_port` if [ ! -d $mysql_unix_port_dir ] then mkdir $mysql_unix_port_dir chown $user $mysql_unix_port_dir chmod 755 $mysql_unix_port_dir fi # If the user doesn't specify a binary, we assume name "mysqld" if test -z "$MYSQLD" then MYSQLD=mysqld fi if test ! -x "$ledir/$MYSQLD" then log_error "The file $ledir/$MYSQLD does not exist or is not executable. Please cd to the mysql installation directory and restart this script from there as follows: ./bin/mysqld_safe& See http://dev.mysql.com/doc/mysql/en/mysqld-safe.html for more information" exit 1 fi if test -z "$pid_file" then pid_file="$DATADIR/`/bin/hostname`.pid" else case "$pid_file" in /* ) ;; * ) pid_file="$DATADIR/$pid_file" ;; esac fi append_arg_to_args "--pid-file=$pid_file" if test -n "$mysql_unix_port" then append_arg_to_args "--socket=$mysql_unix_port" fi if test -n "$mysql_tcp_port" then append_arg_to_args "--port=$mysql_tcp_port" fi if test $niceness -eq 0 then NOHUP_NICENESS="nohup" else NOHUP_NICENESS="nohup nice -$niceness" fi # Using nice with no args to get the niceness level is GNU-specific. # This check could be extended for other operating systems (e.g., # BSD could use "nohup sh -c 'ps -o nice -p $$' | tail -1"). # But, it also seems that GNU nohup is the only one which messes # with the priority, so this is okay. if nohup nice > /dev/null 2>&1 then normal_niceness=`nice` nohup_niceness=`nohup nice 2>/dev/null` numeric_nice_values=1 for val in $normal_niceness $nohup_niceness do case "$val" in -[0-9] | -[0-9][0-9] | -[0-9][0-9][0-9] | \ [0-9] | [0-9][0-9] | [0-9][0-9][0-9] ) ;; * ) numeric_nice_values=0 ;; esac done if test $numeric_nice_values -eq 1 then nice_value_diff=`expr $nohup_niceness - $normal_niceness` if test $? -eq 0 && test $nice_value_diff -gt 0 && \ nice --$nice_value_diff echo testing > /dev/null 2>&1 then # nohup increases the priority (bad), and we are permitted # to lower the priority with respect to the value the user # might have been given niceness=`expr $niceness - $nice_value_diff` NOHUP_NICENESS="nice -$niceness nohup" fi fi else if nohup echo testing > /dev/null 2>&1 then : else # nohup doesn't work on this system NOHUP_NICENESS="" fi fi # Try to set the core file size (even if we aren't root) because many systems # don't specify a hard limit on core file size. if test -n "$core_file_size" then ulimit -c $core_file_size fi # # If there exists an old pid file, check if the daemon is already running # Note: The switches to 'ps' may depend on your operating system if test -f "$pid_file" then PID=`cat "$pid_file"` if /bin/kill -0 $PID > /dev/null 2> /dev/null then if /bin/ps wwwp $PID | grep -v " grep" | grep -v mysqld_safe | grep -- "$MYSQLD" > /dev/null then # The pid contains a mysqld process log_error "A mysqld process already exists" exit 1 fi fi rm -f "$pid_file" if test -f "$pid_file" then log_error "Fatal error: Can't remove the pid file: $pid_file Please remove it manually and start $0 again; mysqld daemon not started" exit 1 fi fi # # Uncomment the following lines if you want all tables to be automatically # checked and repaired during startup. You should add sensible key_buffer # and sort_buffer values to my.cnf to improve check performance or require # less disk space. # Alternatively, you can start mysqld with the "myisam-recover" option. See # the manual for details. # # echo "Checking tables in $DATADIR" # $MY_BASEDIR_VERSION/bin/myisamchk --silent --force --fast --medium-check $DATADIR/*/*.MYI # $MY_BASEDIR_VERSION/bin/isamchk --silent --force $DATADIR/*/*.ISM # Does this work on all systems? #if type ulimit | grep "shell builtin" > /dev/null #then # ulimit -n 256 > /dev/null 2>&1 # Fix for BSD and FreeBSD systems #fi cmd="$NOHUP_NICENESS" for i in "$ledir/$MYSQLD" "$defaults" "--basedir=$MY_BASEDIR_VERSION" \ "--datadir=$DATADIR" "$USER_OPTION" do cmd="$cmd "`shell_quote_string "$i"` done cmd="$cmd $args" # Avoid 'nohup: ignoring input' warning nohup_redir="" test -n "$NOHUP_NICENESS" && nohup_redir=" < /dev/null" log_notice "Starting $MYSQLD daemon with databases from $DATADIR" while true do rm -f $safe_mysql_unix_port "$pid_file" # Some extra safety [ -n "$wsrep_urls" ] && url=`wsrep_pick_url $wsrep_urls` # check connect address if [ -z "$url" ] then wsrep_recovery "$cmd" eval_log_error "$cmd $wsrep_start_position_opt $nohup_redir" else wsrep_recovery "$cmd" eval_log_error "$cmd $wsrep_start_position_opt --wsrep_cluster_address=$url $nohup_redir" fi if test ! -f "$pid_file" # This is removed if normal shutdown then break fi if true && test $KILL_MYSQLD -eq 1 then # Test if one process was hanging. # This is only a fix for Linux (running as base 3 mysqld processes) # but should work for the rest of the servers. # The only thing is ps x => redhat 5 gives warnings when using ps -x. # kill -9 is used or the process won't react on the kill. numofproces=`ps xaww | grep -v "grep" | grep "$ledir/$MYSQLD\>" | grep -c "pid-file=$pid_file"` log_notice "Number of processes running now: $numofproces" I=1 while test "$I" -le "$numofproces" do PROC=`ps xaww | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "pid-file=$pid_file" | sed -n '$p'` for T in $PROC do break done # echo "TEST $I - $T **" if kill -9 $T then log_error "$MYSQLD process hanging, pid $T - killed" else break fi I=`expr $I + 1` done fi log_notice "mysqld restarted" done log_notice "mysqld from pid file $pid_file ended" galera-3-25.3.20/scripts/mysql/freebsd/0000755000015300001660000000000013042054732017404 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/freebsd/mysql-server.sh0000755000015300001660000000521313042054732022415 0ustar jenkinsjenkins#!/bin/sh # # $FreeBSD: tags/RELEASE_9_1_0/databases/mysql55-server/files/mysql-server.in 302141 2012-08-05 23:19:36Z dougb $ # # PROVIDE: mysql # REQUIRE: LOGIN # KEYWORD: shutdown # # Add the following line to /etc/rc.conf to enable mysql: # mysql_enable (bool): Set to "NO" by default. # Set it to "YES" to enable MySQL. # mysql_limits (bool): Set to "NO" by default. # Set it to yes to run `limits -e -U mysql` # just before mysql starts. # mysql_dbdir (str): Default to "/var/db/mysql" # Base database directory. # mysql_pidfile (str): Custum PID file path and name. # Default to "${mysql_dbdir}/${hostname}.pid". # mysql_args (str): Custom additional arguments to be passed # to mysqld_safe (default empty). # . /etc/rc.subr name="mysql" rcvar=mysql_enable load_rc_config $name : ${mysql_enable="NO"} : ${mysql_limits="NO"} : ${mysql_dbdir="/var/db/mysql"} mysql_user="mysql" mysql_limits_args="-e -U ${mysql_user}" pidfile=${mysql_pidfile:-"${mysql_dbdir}/`/bin/hostname`.pid"} command="/usr/sbin/daemon" command_args="-c -f /usr/local/bin/mysqld_safe --defaults-extra-file=${mysql_dbdir}/my.cnf --user=${mysql_user} --datadir=${mysql_dbdir} --pid-file=${pidfile} ${mysql_args}" procname="/usr/local/libexec/mysqld" start_precmd="${name}_prestart" start_postcmd="${name}_poststart" mysql_install_db="/usr/local/bin/mysql_install_db" mysql_install_db_args="--basedir=/usr/local --datadir=${mysql_dbdir} --force" service_startup_timeout=900 startup_sleep=1 sst_progress_file=${mysql_dbdir}/sst_in_progress extra_commands="bootstrap" bootstrap_cmd="mysql_bootstrap" for gcc in gcc44 gcc48 do gcc_dir=/usr/local/lib/${gcc} [ -d "${gcc_dir}" ] && export LD_LIBRARY_PATH="${gcc_dir}:$LD_LIBRARY_PATH" done mysql_bootstrap() { # Bootstrap the cluster, start the first node that initiate the cluster check_startmsgs && echo "Bootstrapping cluster" shift $0 start $rc_extra_args --wsrep-new-cluster } mysql_create_auth_tables() { eval $mysql_install_db $mysql_install_db_args >/dev/null 2>/dev/null [ $? -eq 0 ] && chown -R ${mysql_user}:${mysql_user} ${mysql_dbdir} } mysql_prestart() { if [ ! -d "${mysql_dbdir}/mysql/." ]; then mysql_create_auth_tables || return 1 fi if checkyesno mysql_limits; then eval `/usr/bin/limits ${mysql_limits_args}` 2>/dev/null else return 0 fi } mysql_poststart() { local timeout=${service_startup_timeout} while [ ! -f "${pidfile}" -a ${timeout} -gt 0 ]; do if test -e $sst_progress_file && [ $startup_sleep -ne 100 ]; then check_startmsgs && echo "SST in progress, setting sleep higher" startup_sleep=100 fi timeout=$(( timeout - 1 )) sleep $startup_sleep done return 0 } run_rc_command "$1" galera-3-25.3.20/scripts/mysql/freebsd/server-descr0000644000015300001660000000031513042054732021732 0ustar jenkinsjenkinsMySQL is a very fast, multi-threaded, multi-user and robust SQL (Structured Query Language) database server. WWW: http://www.mysql.com/ Built with wsrep patch %{RELEASE}. WWW: http://www.codership.com/ galera-3-25.3.20/scripts/mysql/freebsd/client-mtree0000644000015300001660000004223013042054732021720 0ustar jenkinsjenkins# $FreeBSD: /tmp/pcvs/ports/Templates/BSD.local.dist,v 1.3 2010-11-12 20:57:14 pav Exp $ # # Please see the file src/etc/mtree/README before making changes to this file. # /set type=dir uname=root gname=wheel mode=0755 . bin .. etc devd .. man.d .. pam.d .. rc.d .. .. include X11 .. .. info .. lib X11 app-defaults .. fonts local .. .. .. .. libdata ldconfig .. ldconfig32 .. pkgconfig .. .. libexec .. man /set uname=man cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. de.ISO8859-1 uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. en.ISO8859-1 /set uname=man cat1 .. cat1aout .. cat2 .. cat3 .. cat4 i386 .. .. cat5 .. cat6 .. cat7 .. cat8 i386 .. .. cat9 i386 .. .. catn .. .. ja uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. ru.KOI8-R /set uname=man cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. .. sbin .. share aclocal .. dict .. doc ja .. .. emacs site-lisp .. .. examples .. java classes .. .. locale af LC_MESSAGES .. .. am LC_MESSAGES .. .. ar LC_MESSAGES .. .. az LC_MESSAGES .. .. be LC_MESSAGES .. .. bg LC_MESSAGES .. .. bn LC_MESSAGES .. .. br LC_MESSAGES .. .. bs LC_MESSAGES .. .. ca LC_MESSAGES .. .. cs LC_MESSAGES .. .. cy LC_MESSAGES .. .. da LC_MESSAGES .. .. de LC_MESSAGES .. .. de_AT LC_MESSAGES .. .. dk LC_MESSAGES .. .. ee LC_MESSAGES .. .. el LC_MESSAGES .. .. en LC_MESSAGES .. .. en_AU LC_MESSAGES .. .. en_CA LC_MESSAGES .. .. en_GB LC_MESSAGES .. .. eo LC_MESSAGES .. .. es LC_MESSAGES .. .. es_ES LC_MESSAGES .. .. es_MX LC_MESSAGES .. .. et LC_MESSAGES .. .. eu LC_MESSAGES .. .. fa LC_MESSAGES .. .. fa_IR LC_MESSAGES .. .. fi LC_MESSAGES .. .. fr LC_MESSAGES .. .. fr_FR LC_MESSAGES .. .. ga LC_MESSAGES .. .. gl LC_MESSAGES .. .. gu LC_MESSAGES .. .. he LC_MESSAGES .. .. hi LC_MESSAGES .. .. hr LC_MESSAGES .. .. hu LC_MESSAGES .. .. id LC_MESSAGES .. .. is LC_MESSAGES .. .. it LC_MESSAGES .. .. ja LC_MESSAGES .. .. ka LC_MESSAGES .. .. kn LC_MESSAGES .. .. ko LC_MESSAGES .. .. li LC_MESSAGES .. .. lt LC_MESSAGES .. .. lv LC_MESSAGES .. .. mk LC_MESSAGES .. .. ml LC_MESSAGES .. .. mn LC_MESSAGES .. .. ms LC_MESSAGES .. .. mt LC_MESSAGES .. .. nb LC_MESSAGES .. .. ne LC_MESSAGES .. .. nl LC_MESSAGES .. .. nn LC_MESSAGES .. .. no LC_MESSAGES .. .. or LC_MESSAGES .. .. pa LC_MESSAGES .. .. pl LC_MESSAGES .. .. pt LC_MESSAGES .. .. pt_BR LC_MESSAGES .. .. pt_PT LC_MESSAGES .. .. ro LC_MESSAGES .. .. ru LC_MESSAGES .. .. sk LC_MESSAGES .. .. sl LC_MESSAGES .. .. sq LC_MESSAGES .. .. sr LC_MESSAGES .. .. sr@Latn LC_MESSAGES .. .. sv LC_MESSAGES .. .. ta LC_MESSAGES .. .. tg LC_MESSAGES .. .. th LC_MESSAGES .. .. tk LC_MESSAGES .. .. tr LC_MESSAGES .. .. uk LC_MESSAGES .. .. uz LC_MESSAGES .. .. vi LC_MESSAGES .. .. wa LC_MESSAGES .. .. zh LC_MESSAGES .. .. zh_CN LC_MESSAGES .. .. zh_CN.GB2312 LC_MESSAGES .. .. zh_TW LC_MESSAGES .. .. zh_TW.Big5 LC_MESSAGES .. .. .. misc .. nls C .. af_ZA.ISO8859-1 .. af_ZA.ISO8859-15 .. af_ZA.UTF-8 .. am_ET.UTF-8 .. be_BY.CP1131 .. be_BY.CP1251 .. be_BY.ISO8859-5 .. be_BY.UTF-8 .. bg_BG.CP1251 .. bg_BG.UTF-8 .. ca_ES.ISO8859-1 .. ca_ES.ISO8859-15 .. ca_ES.UTF-8 .. cs_CZ.ISO8859-2 .. cs_CZ.UTF-8 .. da_DK.ISO8859-1 .. da_DK.ISO8859-15 .. da_DK.UTF-8 .. de_AT.ISO8859-1 .. de_AT.ISO8859-15 .. de_AT.UTF-8 .. de_CH.ISO8859-1 .. de_CH.ISO8859-15 .. de_CH.UTF-8 .. de_DE.ISO8859-1 .. de_DE.ISO8859-15 .. de_DE.UTF-8 .. el_GR.ISO8859-7 .. el_GR.UTF-8 .. en_AU.ISO8859-1 .. en_AU.ISO8859-15 .. en_AU.US-ASCII .. en_AU.UTF-8 .. en_CA.ISO8859-1 .. en_CA.ISO8859-15 .. en_CA.US-ASCII .. en_CA.UTF-8 .. en_GB.ISO8859-1 .. en_GB.ISO8859-15 .. en_GB.US-ASCII .. en_GB.UTF-8 .. en_IE.UTF-8 .. en_NZ.ISO8859-1 .. en_NZ.ISO8859-15 .. en_NZ.US-ASCII .. en_NZ.UTF-8 .. en_US.ISO8859-1 .. en_US.ISO8859-15 .. en_US.UTF-8 .. es_ES.ISO8859-1 .. es_ES.ISO8859-15 .. es_ES.UTF-8 .. et_EE.ISO8859-15 .. et_EE.UTF-8 .. fi_FI.ISO8859-1 .. fi_FI.ISO8859-15 .. fi_FI.UTF-8 .. fr_BE.ISO8859-1 .. fr_BE.ISO8859-15 .. fr_BE.UTF-8 .. fr_CA.ISO8859-1 .. fr_CA.ISO8859-15 .. fr_CA.UTF-8 .. fr_CH.ISO8859-1 .. fr_CH.ISO8859-15 .. fr_CH.UTF-8 .. fr_FR.ISO8859-1 .. fr_FR.ISO8859-15 .. fr_FR.UTF-8 .. he_IL.UTF-8 .. hi_IN.ISCII-DEV .. hr_HR.ISO8859-2 .. hr_HR.UTF-8 .. hu_HU.ISO8859-2 .. hu_HU.UTF-8 .. hy_AM.ARMSCII-8 .. hy_AM.UTF-8 .. is_IS.ISO8859-1 .. is_IS.ISO8859-15 .. is_IS.UTF-8 .. it_CH.ISO8859-1 .. it_CH.ISO8859-15 .. it_CH.UTF-8 .. it_IT.ISO8859-1 .. it_IT.ISO8859-15 .. it_IT.UTF-8 .. ja_JP.SJIS .. ja_JP.UTF-8 .. ja_JP.eucJP .. kk_KZ.PT154 .. kk_KZ.UTF-8 .. ko_KR.CP949 .. ko_KR.UTF-8 .. ko_KR.eucKR .. la_LN.ISO8859-1 .. la_LN.ISO8859-15 .. la_LN.ISO8859-2 .. la_LN.ISO8859-4 .. la_LN.US-ASCII .. lt_LT.ISO8859-13 .. lt_LT.ISO8859-4 .. lt_LT.UTF-8 .. nl_BE.ISO8859-1 .. nl_BE.ISO8859-15 .. nl_BE.UTF-8 .. nl_NL.ISO8859-1 .. nl_NL.ISO8859-15 .. nl_NL.UTF-8 .. no_NO.ISO8859-1 .. no_NO.ISO8859-15 .. no_NO.UTF-8 .. pl_PL.ISO8859-2 .. pl_PL.UTF-8 .. pt_BR.ISO8859-1 .. pt_BR.UTF-8 .. pt_PT.ISO8859-1 .. pt_PT.ISO8859-15 .. pt_PT.UTF-8 .. ro_RO.ISO8859-2 .. ro_RO.UTF-8 .. ru_RU.CP1251 .. ru_RU.CP866 .. ru_RU.ISO8859-5 .. ru_RU.KOI8-R .. ru_RU.UTF-8 .. sk_SK.ISO8859-2 .. sk_SK.UTF-8 .. sl_SI.ISO8859-2 .. sl_SI.UTF-8 .. sr_YU.ISO8859-2 .. sr_YU.ISO8859-5 .. sr_YU.UTF-8 .. sv_SE.ISO8859-1 .. sv_SE.ISO8859-15 .. sv_SE.UTF-8 .. tr_TR.ISO8859-9 .. tr_TR.UTF-8 .. uk_UA.ISO8859-5 .. uk_UA.KOI8-U .. uk_UA.UTF-8 .. zh_CN.GB18030 .. zh_CN.GB2312 .. zh_CN.GBK .. zh_CN.UTF-8 .. zh_CN.eucCN .. zh_HK.Big5HKSCS .. zh_HK.UTF-8 .. zh_TW.Big5 .. zh_TW.UTF-8 .. .. pixmaps .. sgml .. skel .. xml .. .. www .. .. galera-3-25.3.20/scripts/mysql/freebsd/client-plist0000644000015300001660000001107013042054732021735 0ustar jenkinsjenkins@comment PKG_FORMAT_REVISION:1.1 @name mysql-client-%{MYSQL_VER}_wsrep_%{RELEASE} @comment ORIGIN:databases/mysql%{MAJORMINOR}-client_wsrep @cwd /usr/local @srcdir %{SRCDIR} @comment "=== dependencies ===" @comment "require /usr/local/lib/gcc48/libstdc++.so" @comment // @pkgdep gcc-4.8.2.s20130808 @comment DEPORIGIN:lang/gcc48 @comment // @pkgdep openssl-1.0.1_8 @comment DEPORIGIN:security/openssl @comment // @pkgdep libexecinfo-1.1_3 @comment DEPORIGIN:devel/libexecinfo @conflicts mysql-client-[34].* @conflicts mysql-client-5.[0-57-9].* @conflicts mariadb-client-5.* @conflicts percona-client-5.* @comment "=== preinstall stage ===" @exec echo "===> Linking /usr/local/bin/bash to /bin/bash" @exec [ -x /bin/bash ] && echo "Using existing /bin/bash." || ln -s ../usr/local/bin/bash /bin/bash @comment "=== file section ===" @owner root @group wheel @mode 0444 share/licenses/mysql-client-%{MYSQL_VER}_wsrep_%{RELEASE}/catalog.mk share/licenses/mysql-client-%{MYSQL_VER}_wsrep_%{RELEASE}/LICENSE share/licenses/mysql-client-%{MYSQL_VER}_wsrep_%{RELEASE}/GPLv3 man/man1/comp_err.1.gz man/man1/msql2mysql.1.gz man/man1/mysql.1.gz man/man1/mysql_config.1.gz man/man1/mysql_config_editor.1.gz man/man1/mysql_find_rows.1.gz man/man1/mysql_waitpid.1.gz man/man1/mysqlaccess.1.gz man/man1/mysqladmin.1.gz man/man1/mysqlbinlog.1.gz man/man1/mysqlcheck.1.gz man/man1/mysqldump.1.gz man/man1/mysqlimport.1.gz man/man1/mysqlshow.1.gz man/man1/mysqlslap.1.gz @mode 0555 bin/msql2mysql bin/mysql bin/mysql_config bin/mysql_config_editor bin/mysql_find_rows bin/mysql_waitpid bin/mysqlaccess bin/mysqlaccess.conf bin/mysqladmin bin/mysqlbinlog bin/mysqlcheck bin/mysqldump bin/mysqlimport bin/mysqlshow bin/mysqlslap @mode 0444 include/mysql/big_endian.h include/mysql/byte_order_generic.h include/mysql/byte_order_generic_x86.h include/mysql/byte_order_generic_x86_64.h include/mysql/decimal.h include/mysql/errmsg.h include/mysql/keycache.h include/mysql/little_endian.h include/mysql/m_ctype.h include/mysql/m_string.h include/mysql/my_alloc.h include/mysql/my_attribute.h include/mysql/my_byteorder.h include/mysql/my_compiler.h include/mysql/my_config.h include/mysql/my_dbug.h include/mysql/my_dir.h include/mysql/my_getopt.h include/mysql/my_global.h include/mysql/my_list.h include/mysql/my_net.h include/mysql/my_pthread.h include/mysql/my_sys.h include/mysql/my_xml.h include/mysql/mysql.h include/mysql/mysql/client_authentication.h include/mysql/mysql/client_plugin.h include/mysql/mysql/client_plugin.h.pp include/mysql/mysql/get_password.h include/mysql/mysql/innodb_priv.h include/mysql/mysql/plugin.h include/mysql/mysql/plugin_audit.h include/mysql/mysql/plugin_audit.h.pp include/mysql/mysql/plugin_auth.h include/mysql/mysql/plugin_auth.h.pp include/mysql/mysql/plugin_auth_common.h include/mysql/mysql/plugin_ftparser.h include/mysql/mysql/plugin_ftparser.h.pp include/mysql/mysql/plugin_validate_password.h include/mysql/mysql/psi/mysql_file.h include/mysql/mysql/psi/mysql_idle.h include/mysql/mysql/psi/mysql_socket.h include/mysql/mysql/psi/mysql_stage.h include/mysql/mysql/psi/mysql_statement.h include/mysql/mysql/psi/mysql_table.h include/mysql/mysql/psi/mysql_thread.h include/mysql/mysql/psi/psi.h include/mysql/mysql/service_my_plugin_log.h include/mysql/mysql/service_my_snprintf.h include/mysql/mysql/service_mysql_string.h include/mysql/mysql/service_thd_alloc.h include/mysql/mysql/service_thd_wait.h include/mysql/mysql/service_thread_scheduler.h include/mysql/mysql/services.h include/mysql/mysql/thread_pool_priv.h include/mysql/mysql_com.h include/mysql/mysql_com_server.h include/mysql/mysql_embed.h include/mysql/mysql_time.h include/mysql/mysql_version.h include/mysql/mysqld_ername.h include/mysql/mysqld_error.h include/mysql/plugin.h include/mysql/plugin_audit.h include/mysql/plugin_ftparser.h include/mysql/plugin_validate_password.h include/mysql/sql_common.h include/mysql/sql_state.h include/mysql/sslopt-case.h include/mysql/sslopt-longopts.h include/mysql/sslopt-vars.h include/mysql/typelib.h lib/mysql/libmysqlclient.a lib/mysql/libmysqlclient.so lib/mysql/libmysqlclient.so.18 lib/mysql/libmysqlclient_r.a lib/mysql/libmysqlclient_r.so lib/mysql/libmysqlclient_r.so.18 lib/mysql/libmysqlservices.a share/aclocal/mysql.m4 libdata/ldconfig/mysql%{MAJORMINOR}-client @comment "=== postinstall stage ===" @exec /sbin/ldconfig -m /usr/local/lib/mysql @comment "=== postremove stage ===" @dirrm share/licenses/mysql-client-%{MYSQL_VER}_wsrep_%{RELEASE} @dirrm include/mysql/mysql/psi @dirrm include/mysql/mysql @dirrm include/mysql @unexec rmdir "%D/lib/mysql" 2>/dev/null || true @unexec service ldconfig start >/dev/null galera-3-25.3.20/scripts/mysql/freebsd/server-comment0000644000015300001660000000006213042054732022273 0ustar jenkinsjenkinswsrep-enabled multithreaded SQL database (server) galera-3-25.3.20/scripts/mysql/freebsd/client-descr0000644000015300001660000000031513042054732021702 0ustar jenkinsjenkinsMySQL is a very fast, multi-threaded, multi-user and robust SQL (Structured Query Language) database server. WWW: http://www.mysql.com/ Built with wsrep patch %{RELEASE}. WWW: http://www.codership.com/ galera-3-25.3.20/scripts/mysql/freebsd/server-mtree0000644000015300001660000004223013042054732021750 0ustar jenkinsjenkins# $FreeBSD: /tmp/pcvs/ports/Templates/BSD.local.dist,v 1.3 2010-11-12 20:57:14 pav Exp $ # # Please see the file src/etc/mtree/README before making changes to this file. # /set type=dir uname=root gname=wheel mode=0755 . bin .. etc devd .. man.d .. pam.d .. rc.d .. .. include X11 .. .. info .. lib X11 app-defaults .. fonts local .. .. .. .. libdata ldconfig .. ldconfig32 .. pkgconfig .. .. libexec .. man /set uname=man cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. de.ISO8859-1 uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. en.ISO8859-1 /set uname=man cat1 .. cat1aout .. cat2 .. cat3 .. cat4 i386 .. .. cat5 .. cat6 .. cat7 .. cat8 i386 .. .. cat9 i386 .. .. catn .. .. ja uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. ru.KOI8-R /set uname=man cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. catl .. catn .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. manl .. mann .. .. .. sbin .. share aclocal .. dict .. doc ja .. .. emacs site-lisp .. .. examples .. java classes .. .. locale af LC_MESSAGES .. .. am LC_MESSAGES .. .. ar LC_MESSAGES .. .. az LC_MESSAGES .. .. be LC_MESSAGES .. .. bg LC_MESSAGES .. .. bn LC_MESSAGES .. .. br LC_MESSAGES .. .. bs LC_MESSAGES .. .. ca LC_MESSAGES .. .. cs LC_MESSAGES .. .. cy LC_MESSAGES .. .. da LC_MESSAGES .. .. de LC_MESSAGES .. .. de_AT LC_MESSAGES .. .. dk LC_MESSAGES .. .. ee LC_MESSAGES .. .. el LC_MESSAGES .. .. en LC_MESSAGES .. .. en_AU LC_MESSAGES .. .. en_CA LC_MESSAGES .. .. en_GB LC_MESSAGES .. .. eo LC_MESSAGES .. .. es LC_MESSAGES .. .. es_ES LC_MESSAGES .. .. es_MX LC_MESSAGES .. .. et LC_MESSAGES .. .. eu LC_MESSAGES .. .. fa LC_MESSAGES .. .. fa_IR LC_MESSAGES .. .. fi LC_MESSAGES .. .. fr LC_MESSAGES .. .. fr_FR LC_MESSAGES .. .. ga LC_MESSAGES .. .. gl LC_MESSAGES .. .. gu LC_MESSAGES .. .. he LC_MESSAGES .. .. hi LC_MESSAGES .. .. hr LC_MESSAGES .. .. hu LC_MESSAGES .. .. id LC_MESSAGES .. .. is LC_MESSAGES .. .. it LC_MESSAGES .. .. ja LC_MESSAGES .. .. ka LC_MESSAGES .. .. kn LC_MESSAGES .. .. ko LC_MESSAGES .. .. li LC_MESSAGES .. .. lt LC_MESSAGES .. .. lv LC_MESSAGES .. .. mk LC_MESSAGES .. .. ml LC_MESSAGES .. .. mn LC_MESSAGES .. .. ms LC_MESSAGES .. .. mt LC_MESSAGES .. .. nb LC_MESSAGES .. .. ne LC_MESSAGES .. .. nl LC_MESSAGES .. .. nn LC_MESSAGES .. .. no LC_MESSAGES .. .. or LC_MESSAGES .. .. pa LC_MESSAGES .. .. pl LC_MESSAGES .. .. pt LC_MESSAGES .. .. pt_BR LC_MESSAGES .. .. pt_PT LC_MESSAGES .. .. ro LC_MESSAGES .. .. ru LC_MESSAGES .. .. sk LC_MESSAGES .. .. sl LC_MESSAGES .. .. sq LC_MESSAGES .. .. sr LC_MESSAGES .. .. sr@Latn LC_MESSAGES .. .. sv LC_MESSAGES .. .. ta LC_MESSAGES .. .. tg LC_MESSAGES .. .. th LC_MESSAGES .. .. tk LC_MESSAGES .. .. tr LC_MESSAGES .. .. uk LC_MESSAGES .. .. uz LC_MESSAGES .. .. vi LC_MESSAGES .. .. wa LC_MESSAGES .. .. zh LC_MESSAGES .. .. zh_CN LC_MESSAGES .. .. zh_CN.GB2312 LC_MESSAGES .. .. zh_TW LC_MESSAGES .. .. zh_TW.Big5 LC_MESSAGES .. .. .. misc .. nls C .. af_ZA.ISO8859-1 .. af_ZA.ISO8859-15 .. af_ZA.UTF-8 .. am_ET.UTF-8 .. be_BY.CP1131 .. be_BY.CP1251 .. be_BY.ISO8859-5 .. be_BY.UTF-8 .. bg_BG.CP1251 .. bg_BG.UTF-8 .. ca_ES.ISO8859-1 .. ca_ES.ISO8859-15 .. ca_ES.UTF-8 .. cs_CZ.ISO8859-2 .. cs_CZ.UTF-8 .. da_DK.ISO8859-1 .. da_DK.ISO8859-15 .. da_DK.UTF-8 .. de_AT.ISO8859-1 .. de_AT.ISO8859-15 .. de_AT.UTF-8 .. de_CH.ISO8859-1 .. de_CH.ISO8859-15 .. de_CH.UTF-8 .. de_DE.ISO8859-1 .. de_DE.ISO8859-15 .. de_DE.UTF-8 .. el_GR.ISO8859-7 .. el_GR.UTF-8 .. en_AU.ISO8859-1 .. en_AU.ISO8859-15 .. en_AU.US-ASCII .. en_AU.UTF-8 .. en_CA.ISO8859-1 .. en_CA.ISO8859-15 .. en_CA.US-ASCII .. en_CA.UTF-8 .. en_GB.ISO8859-1 .. en_GB.ISO8859-15 .. en_GB.US-ASCII .. en_GB.UTF-8 .. en_IE.UTF-8 .. en_NZ.ISO8859-1 .. en_NZ.ISO8859-15 .. en_NZ.US-ASCII .. en_NZ.UTF-8 .. en_US.ISO8859-1 .. en_US.ISO8859-15 .. en_US.UTF-8 .. es_ES.ISO8859-1 .. es_ES.ISO8859-15 .. es_ES.UTF-8 .. et_EE.ISO8859-15 .. et_EE.UTF-8 .. fi_FI.ISO8859-1 .. fi_FI.ISO8859-15 .. fi_FI.UTF-8 .. fr_BE.ISO8859-1 .. fr_BE.ISO8859-15 .. fr_BE.UTF-8 .. fr_CA.ISO8859-1 .. fr_CA.ISO8859-15 .. fr_CA.UTF-8 .. fr_CH.ISO8859-1 .. fr_CH.ISO8859-15 .. fr_CH.UTF-8 .. fr_FR.ISO8859-1 .. fr_FR.ISO8859-15 .. fr_FR.UTF-8 .. he_IL.UTF-8 .. hi_IN.ISCII-DEV .. hr_HR.ISO8859-2 .. hr_HR.UTF-8 .. hu_HU.ISO8859-2 .. hu_HU.UTF-8 .. hy_AM.ARMSCII-8 .. hy_AM.UTF-8 .. is_IS.ISO8859-1 .. is_IS.ISO8859-15 .. is_IS.UTF-8 .. it_CH.ISO8859-1 .. it_CH.ISO8859-15 .. it_CH.UTF-8 .. it_IT.ISO8859-1 .. it_IT.ISO8859-15 .. it_IT.UTF-8 .. ja_JP.SJIS .. ja_JP.UTF-8 .. ja_JP.eucJP .. kk_KZ.PT154 .. kk_KZ.UTF-8 .. ko_KR.CP949 .. ko_KR.UTF-8 .. ko_KR.eucKR .. la_LN.ISO8859-1 .. la_LN.ISO8859-15 .. la_LN.ISO8859-2 .. la_LN.ISO8859-4 .. la_LN.US-ASCII .. lt_LT.ISO8859-13 .. lt_LT.ISO8859-4 .. lt_LT.UTF-8 .. nl_BE.ISO8859-1 .. nl_BE.ISO8859-15 .. nl_BE.UTF-8 .. nl_NL.ISO8859-1 .. nl_NL.ISO8859-15 .. nl_NL.UTF-8 .. no_NO.ISO8859-1 .. no_NO.ISO8859-15 .. no_NO.UTF-8 .. pl_PL.ISO8859-2 .. pl_PL.UTF-8 .. pt_BR.ISO8859-1 .. pt_BR.UTF-8 .. pt_PT.ISO8859-1 .. pt_PT.ISO8859-15 .. pt_PT.UTF-8 .. ro_RO.ISO8859-2 .. ro_RO.UTF-8 .. ru_RU.CP1251 .. ru_RU.CP866 .. ru_RU.ISO8859-5 .. ru_RU.KOI8-R .. ru_RU.UTF-8 .. sk_SK.ISO8859-2 .. sk_SK.UTF-8 .. sl_SI.ISO8859-2 .. sl_SI.UTF-8 .. sr_YU.ISO8859-2 .. sr_YU.ISO8859-5 .. sr_YU.UTF-8 .. sv_SE.ISO8859-1 .. sv_SE.ISO8859-15 .. sv_SE.UTF-8 .. tr_TR.ISO8859-9 .. tr_TR.UTF-8 .. uk_UA.ISO8859-5 .. uk_UA.KOI8-U .. uk_UA.UTF-8 .. zh_CN.GB18030 .. zh_CN.GB2312 .. zh_CN.GBK .. zh_CN.UTF-8 .. zh_CN.eucCN .. zh_HK.Big5HKSCS .. zh_HK.UTF-8 .. zh_TW.Big5 .. zh_TW.UTF-8 .. .. pixmaps .. sgml .. skel .. xml .. .. www .. .. galera-3-25.3.20/scripts/mysql/freebsd/client-message0000644000015300001660000000000013042054732022215 0ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/freebsd/LICENSE0000644000015300001660000000012113042054732020403 0ustar jenkinsjenkinsThis package has a single license: GPLv3 (GNU General Public License version 3). galera-3-25.3.20/scripts/mysql/freebsd/client-ldconfig0000644000015300001660000000002513042054732022365 0ustar jenkinsjenkins/usr/local/lib/mysql galera-3-25.3.20/scripts/mysql/freebsd/catalog.mk0000644000015300001660000000037213042054732021351 0ustar jenkinsjenkins_LICENSE=GPLv3 _LICENSE_NAME=GNU General Public License version 3 _LICENSE_PERMS=dist-mirror dist-sell pkg-mirror pkg-sell auto-accept _LICENSE_GROUPS=FSF GPL OSI _LICENSE_DISTFILES=mysql-%{MYSQL_VER}.tar.gz mysql-${MYSQL_VER}_wsrep_${RELEASE}.patch galera-3-25.3.20/scripts/mysql/freebsd/server-message0000644000015300001660000000040613042054732022257 0ustar jenkinsjenkins************************************************************************ Remember to run mysql_upgrade the first time you start the MySQL server after an upgrade from an earlier version. ************************************************************************ galera-3-25.3.20/scripts/mysql/freebsd/client-comment0000644000015300001660000000006213042054732022243 0ustar jenkinsjenkinswsrep-enabled multithreaded SQL database (client) galera-3-25.3.20/scripts/mysql/freebsd/server-plist0000644000015300001660000001653113042054732021774 0ustar jenkinsjenkins@comment PKG_FORMAT_REVISION:1.1 @name mysql-server-%{MYSQL_VER}_wsrep_%{RELEASE} @comment ORIGIN:databases/mysql%{MAJORMINOR}-server_wsrep @cwd /usr/local @srcdir %{SRCDIR} @comment "=== dependencies ===" @comment "require /usr/local/lib/gcc48/libstdc++.so" @comment // @pkgdep gcc-4.8.2.s20130808 @comment DEPORIGIN:lang/gcc48 @comment // @pkgdep openssl-1.0.1_8 @comment DEPORIGIN:security/openssl @comment // @pkgdep libexecinfo-1.1_3 @comment DEPORIGIN:devel/libexecinfo @comment // @pkgdep lsof-4.88.d,8 @comment DEPORIGIN:sysutils/lsof @comment // @pkgdep sudo-1.8.7_1 @comment DEPORIGIN:security/sudo @comment // @pkgdep rsync-3.0.9_3 @comment DEPORIGIN:net/rsync @comment // @pkgdep perl-5.14.4 @comment DEPORIGIN:lang/perl5.14 @pkgdep mysql-client-%{MYSQL_VER}_wsrep_%{RELEASE} @comment DEPORIGIN:databases/mysql%{MAJORMINOR}-client_wsrep @conflicts mysql-server-[34].* @conflicts mysql-server-5.[0-57-9].* @conflicts mariadb-server-5.* @conflicts percona-server-5.* @comment "=== preinstall stage ===" @exec echo "===> Linking /usr/local/bin/bash to /bin/bash" @exec [ -x /bin/bash ] && echo "Using existing /bin/bash." || ln -s ../usr/local/bin/bash /bin/bash @exec echo "===> Creating users and/or groups." @exec if ! /usr/sbin/pw groupshow mysql >/dev/null 2>&1; then echo "Creating group 'mysql' with gid '88'."; /usr/sbin/pw groupadd mysql -g 88; else echo "Using existing group 'mysql'."; fi @exec if ! /usr/sbin/pw usershow mysql >/dev/null 2>&1; then echo "Creating user 'mysql' with uid '88'."; /usr/sbin/pw useradd mysql -u 88 -g 88 -c "MySQL Daemon" -d /var/db/mysql -s /usr/sbin/nologin; else echo "Using existing user 'mysql'."; fi @exec install -d -g 88 -o 88 /var/db/mysql @comment "=== preremove stage ===" @unexec %D/etc/rc.d/mysql-server forcestop 2>/dev/null || true @comment "=== file section ===" @owner root @group wheel @mode 0444 share/licenses/mysql-server-%{MYSQL_VER}_wsrep_%{RELEASE}/catalog.mk share/licenses/mysql-server-%{MYSQL_VER}_wsrep_%{RELEASE}/LICENSE share/licenses/mysql-server-%{MYSQL_VER}_wsrep_%{RELEASE}/GPLv3 @comment "added: man/man1/innochecksum.1.gz" man/man1/innochecksum.1.gz man/man1/my_print_defaults.1.gz man/man1/myisam_ftdump.1.gz man/man1/myisamchk.1.gz man/man1/myisamlog.1.gz man/man1/myisampack.1.gz man/man1/mysql.server.1.gz man/man1/mysql_convert_table_format.1.gz man/man1/mysql_fix_extensions.1.gz man/man1/mysql_install_db.1.gz man/man1/mysql_plugin.1.gz man/man1/mysql_secure_installation.1.gz man/man1/mysql_setpermission.1.gz man/man1/mysql_tzinfo_to_sql.1.gz man/man1/mysql_upgrade.1.gz man/man1/mysql_zap.1.gz man/man1/mysqlbug.1.gz man/man1/mysqld_multi.1.gz man/man1/mysqld_safe.1.gz man/man1/mysqldumpslow.1.gz man/man1/mysqlhotcopy.1.gz man/man1/mysqlman.1.gz man/man1/mysqltest.1.gz man/man1/perror.1.gz man/man1/replace.1.gz man/man1/resolve_stack_dump.1.gz man/man1/resolveip.1.gz man/man8/mysqld.8.gz @mode 0555 bin/innochecksum bin/my_print_defaults bin/myisam_ftdump bin/myisamchk bin/myisamlog bin/myisampack bin/mysql_convert_table_format bin/mysql_fix_extensions bin/mysql_install_db bin/mysql_plugin bin/mysql_secure_installation bin/mysql_setpermission bin/mysql_tzinfo_to_sql bin/mysql_upgrade bin/mysql_zap bin/mysqlbug bin/mysqld_multi bin/mysqld_safe bin/mysqldumpslow bin/mysqlhotcopy bin/mysqltest bin/perror bin/replace bin/resolve_stack_dump bin/resolveip @mode 0444 @ignore lib/mysql/libmysqld.a lib/mysql/plugin/adt_null.so lib/mysql/plugin/auth.so lib/mysql/plugin/auth_test_plugin.so lib/mysql/plugin/daemon_example.ini lib/mysql/plugin/libdaemon_example.so lib/mysql/plugin/mypluglib.so lib/mysql/plugin/qa_auth_client.so lib/mysql/plugin/qa_auth_interface.so lib/mysql/plugin/qa_auth_server.so lib/mysql/plugin/semisync_master.so lib/mysql/plugin/semisync_slave.so lib/mysql/plugin/validate_password.so @mode 0555 libexec/mysqld share/mysql/binary-configure @mode 0444 share/mysql/bulgarian/errmsg.sys share/mysql/charsets/Index.xml share/mysql/charsets/README share/mysql/charsets/armscii8.xml share/mysql/charsets/ascii.xml share/mysql/charsets/cp1250.xml share/mysql/charsets/cp1251.xml share/mysql/charsets/cp1256.xml share/mysql/charsets/cp1257.xml share/mysql/charsets/cp850.xml share/mysql/charsets/cp852.xml share/mysql/charsets/cp866.xml share/mysql/charsets/dec8.xml share/mysql/charsets/geostd8.xml share/mysql/charsets/greek.xml share/mysql/charsets/hebrew.xml share/mysql/charsets/hp8.xml share/mysql/charsets/keybcs2.xml share/mysql/charsets/koi8r.xml share/mysql/charsets/koi8u.xml share/mysql/charsets/latin1.xml share/mysql/charsets/latin2.xml share/mysql/charsets/latin5.xml share/mysql/charsets/latin7.xml share/mysql/charsets/macce.xml share/mysql/charsets/macroman.xml share/mysql/charsets/swe7.xml share/mysql/czech/errmsg.sys share/mysql/danish/errmsg.sys share/mysql/dictionary.txt share/mysql/dutch/errmsg.sys share/mysql/english/errmsg.sys share/mysql/errmsg-utf8.txt share/mysql/estonian/errmsg.sys share/mysql/fill_help_tables.sql share/mysql/french/errmsg.sys share/mysql/german/errmsg.sys share/mysql/greek/errmsg.sys share/mysql/hungarian/errmsg.sys share/mysql/innodb_memcached_config.sql share/mysql/italian/errmsg.sys share/mysql/japanese/errmsg.sys share/mysql/korean/errmsg.sys share/mysql/magic share/mysql/my-default.cnf share/mysql/mysql-log-rotate share/mysql/mysql.server share/mysql/mysql_security_commands.sql share/mysql/mysql_system_tables.sql share/mysql/mysql_system_tables_data.sql share/mysql/mysql_test_data_timezone.sql share/mysql/mysqld_multi.server share/mysql/norwegian-ny/errmsg.sys share/mysql/norwegian/errmsg.sys share/mysql/polish/errmsg.sys share/mysql/portuguese/errmsg.sys share/mysql/romanian/errmsg.sys share/mysql/russian/errmsg.sys share/mysql/serbian/errmsg.sys share/mysql/slovak/errmsg.sys share/mysql/spanish/errmsg.sys share/mysql/swedish/errmsg.sys share/mysql/ukrainian/errmsg.sys @comment "wsrep specific @mode 0555 bin/wsrep_sst_common bin/wsrep_sst_mysqldump bin/wsrep_sst_rsync bin/wsrep_sst_rsync_wan bin/wsrep_sst_xtrabackup @mode 0444 share/mysql/my_wsrep.cnf share/mysql/wsrep_notify share/doc/mysql%{MAJORMINOR}-server_wsrep/README share/doc/mysql%{MAJORMINOR}-server_wsrep/QUICK_START @mode 0555 etc/rc.d/mysql-server @comment "=== postinstall stage ===" @comment "=== postremove stage ===" @dirrm share/licenses/mysql-server-%{MYSQL_VER}_wsrep_%{RELEASE} @dirrm lib/mysql/plugin @unexec rmdir "%D/lib/mysql" 2>/dev/null || true @dirrm share/mysql/bulgarian @dirrm share/mysql/charsets @dirrm share/mysql/czech @dirrm share/mysql/danish @dirrm share/mysql/dutch @dirrm share/mysql/english @dirrm share/mysql/estonian @dirrm share/mysql/french @dirrm share/mysql/german @dirrm share/mysql/greek @dirrm share/mysql/hungarian @dirrm share/mysql/italian @dirrm share/mysql/japanese @dirrm share/mysql/korean @dirrm share/mysql/norwegian @dirrm share/mysql/norwegian-ny @dirrm share/mysql/polish @dirrm share/mysql/portuguese @dirrm share/mysql/romanian @dirrm share/mysql/russian @dirrm share/mysql/serbian @dirrm share/mysql/slovak @dirrm share/mysql/spanish @dirrm share/mysql/swedish @dirrm share/mysql/ukrainian @dirrm share/mysql @unexec if [ -f %D/info/dir ]; then if sed -e '1,/Menu:/d' %D/info/dir | grep -q '^[*] '; then true; else rm %D/info/dir; fi; fi @unexec if /usr/sbin/pw user show mysql >/dev/null 2>&1; then echo "==> You should manually remove the \"mysql\" user. "; fi @unexec if /usr/sbin/pw group show mysql >/dev/null 2>&1; then echo "==> You should manually remove the \"mysql\" group. "; fi galera-3-25.3.20/scripts/mysql/LICENSE0000644000015300001660000000006613042054732017001 0ustar jenkinsjenkinsSee ./mysql/LICENSE.mysql and ./galera/LICENSE.galera galera-3-25.3.20/scripts/mysql/mysql_var_5.1.tgz0000644000015300001660000047454713042054732021145 0ustar jenkinsjenkinsLJ]|ՙ\10 =]K!!hwvwٙ" c/ I68){R 1_r$Ws\p-.wi%wmzX}zo8zhK48ȏ)TT(, RRiXR&srjiwNlvϵ'&OA'O'$8/ .JzW*"@u[/fV"7LvLluucr,xUx?rՐ2ǒ!Cs,ǿt_= U(PO| *pɳeGmW<?_iG=R?jYD/'NRW:E~ZRn){)H_1>>4=፝~ƸECټ#ȑ#G9rȑ#GY8։;r ~cѢ>ln!ypZT?uJ{ (+V(eHsȑ#G9rȑ#GG|NS.o)P^Up~зg g+{n-E20my\8 +Gmb ɑfF kjjvBΑ#G9rq\ph87'oho96)PdmmAZQ)٪5ՙE ˯:byoRrK;6ok9|jZ6X-?2%jqyk{rjj^isg0XTlI_Xtxo wb2eLhaqkקNU_Ԓ'$zu QfLXf /P^9r|/ە8] ;ڰL'%(7&t6h}6IV ^}Pת^Vk|;V U(kBz(0% P`h:7Yî5ApH D|N庖dN%t k`!Dd\LPv #;h*[YSX3V5%=K4etmR&;$fRĘdMH/ɚԐ{NB_dM%j&/kMi&l5O(X0&jLvµU p!p\iT+ytU Ȭu[NaJ,:5!g4j{sz֔ˇ]Ϛ^hMM)k)0kjONĄTh&b˂5 \k+RCi&m+h ޢ`҉2Sִt \hmR&% Z3q)$j \Fv-z(pfnք\ &Jxd<hV3Aqa5t: rʰg$ē앍gX,9i&fΒ}حȗjGcƬk(մǯdM%Zضk>'VHĤ59Ą뇂g -fPڬ XE3㠙XMu[} lQǵkPvdMIIhZ#X|s،On{WZ~@sdS|Lh:顰H7['w>bͮN$Za x#^^:7IY@%qݘ?aqTg%@fF#q4ur8`5t-d$ɲW<0BLAuVdrfHb;86kb<;5+rc4'xt;c!U/Z5-]{T$R bz{2'd:9R %jQ>҂-YS%0F#TXGӰjH8؍/"y0MdwY'7Rƍx>;cpWl,ʹ;ՅgXe6x!Iubk`=IWqe:M'fbCR*Y0:'{&4uYSp+ź%͂ȴ 6%G'`3V-ya)E$;KڡqZc٭oiikh-[p+o-wEh&%xd vb"b~ִt -VE3q;R$xQ`s*a,X31V˚^I(JʵZ$vaΤ6fCr>٥QeM207'/-\K:I#A&;eIS &dMH/h:#7ć5G-joG5#- |v'׷$xSDٳaSe;Xք|RM_-7N~]AE=}\/VNdӬV*-7 LK,|'&#>ϜzΕlЈ9QI=BbƴȚ/ \|`ײi4;dճa |VEHxV3b'&'+<1# N+K>+2*pFkbܳ\70>D!7x:\f⛔8. |Xdn`m\Xvj;i*4*Y39Yb0,:5Ϛ=$v8!:a'9Q4p4k:z~ N(7@?dǣ O،9j/:86  Ыصx&{ ߳'~mLu#;^$tbǒ?Ț?N:+Iw5}`aGu.ቤd٪ ǜsKTә}ܪZW 9!;?K$M\}vNU;Ŏ,9 \ r/S&B㕣@+&`?bHBRjyoQb  (ogsIYlldgP `M, ڏCѲ|N-9#gX,XhJY ׵&]WZ& .2kBzt$LdUp~CWY49dkveKq8E3Q-ɡ&+^8U31lGc f2P2 k9r:'Hk,pr8"KQgi&F-7\U,v@txî*(*ɽkS3q>Nu@3!0q!-ʭh *nyPx^4훸Xjv֤t\l'RbP+ \n\5)W]BWI@ii5)WƱB5O7b x9Y*9UGj) fSf0t[eY6еFV84[e?D X_x"3U&4PLۖs0R3M ѕm28btAb3S2,5-]אۤLL\NFrwg&;9fg_͌9=$:Y^KGGritźqMr5n -Aۑ0f\(Ϊ,3 -M+ɆVĊOXk?ɁM6a,k5BdWIhVf핀8I,eMH/(O'7%!R2F©`cEEAܔJJi[|Y,Z5H,^`p8~sPU-2ce)5P/"Kر^31b]E ?[XZ=6PXD>'6&E$K&tNH?pkj˜"kmtN{q`F,lf?vD3/oXK+nvc7k[83^t\}*58K+=\;hww-وH=E'#/<;Z6^[ldX_9õl,w<|b2!:UIz6HeN,M}VB7'a)IQ4֨&^G]5k2z^INCwB3b"+7e%&<*meMLH^͜,f-IܓNoe!z(d$'~N S3u{~l"vKՈ4iCxOZ*xo{*"t'Ĉ.4W3A^5U~8kb>LVX 1J6> Z@n>F fMI97h8%egۖ&[$vr/'S쌏@Yg^;y5)T)\W& r$>Cx]4>2Q|fMF'8T4b )<d;/-$Ҷ/r'ys2%b%gkugMFo$ //wjgMKWʽcr8cRy0ff <&LڊU{#]2jKz·!AJGT</xs~v`~Vm#8VuQ=RH,ڟLw+y'UQ&X4t IU5R4FDT}XmkݜO %O^%v(_oRϤԋtS!oRH ?`~7󞥀D-l\M@VdT67mogXaPQ5怋`Xg,&s%<0.3<p9bx993Y9೙>9s>9_W92|1s0|)s1|9sW0|%s_c*怯f怯eȫ99-m]=}?C?#?>cq怿?S?3?smx?s3|9~9s/3M怿sҚ$zn 6I6)F #"7L.noBrh :|#pz:HSmO ~ ?2cJ\|j~uU6eU6kiﵷ97eJl1ʞxȗ`ЦT|B"mƈ(Ǔ¥=u:k룴C:Sy҄0ʷ&pWUDV8)CѤ6=ɖ5~aj~7~Ҧ8#v0-?JUy2;j߃%:]GZF d2Nұ=}O+`S7]v0UϏz*9[nɌMOMfʌzʖUGSvx;+m#/l_WТJ' !OeG] Ct*mQ iM04;%:(wJM⮥@MT.˗u!B[Ne*RTt=E>Why,!vhRn59EƇsC=ίp%|v:{Srj:F;{vxk'vȒ7bW#PT {l;Y&W}"~B eл5;i9 = V3);) ~0`IhDRwnh75uJ&9حfݬ{a nIIlTܙoRTYcT=ZY=8F~w[ EXms^I*rvtNr7cK([Dh&ETks.gxS!dM AaRm_-._9﫪?``?d#*,0_?fVd"i#lJJJ6%%%%%%%%%%%%%%%%%%kP@i2G@@@@@@@@@@@@@@@-u>EpV! w'Jw~ vdXLWiRp.݋b:䵼/T MJ@N§[8N3SKSt鑩$%hO;Kx/^JSjrR/a,#d4,M:W^aǡ8h;A:aZ*=*NX4< >XGEEEj?X]{%qZyV-?SF䔛AmIrh:J"X%&W.3/%1MMxr·:t+63]'c%K%3MK!gsɮXT]-Q$bNSnɬ6^jݼ\׺ob7r-GPwp*"FUI^N6jCJC)`La7SԽJE} Ϫڭ/hmkIuJGyL5IF'8G ~i}pJDICa!WmIgK+ʟ%pORH:>mw!y|ޮO8靴w(;IVJjl6qm.{_t-Ԃj mI8[mA˔#(iG}RH-tQϤ^~3x֜JJl'7&0iEBU7#rr#[ᐫt|U}w= Z~êB_Vv;%x_;\4읂-[ߤ=w6*sP}A}A}Azv#o%%%%;,s%TUK}bX1G-tRa@37Y9Bɾ@57 P<ZRMH}AoPiz" Ԯ4+(\ T(((oeP/(P/(P/(P/(PP>0P>0Pb%%%%%%%%%%%%%%%%%%%%%%%}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}`}``k.a~C=<- gsI%qԤ-3Q_?W4|ƴ)Y~ A^~Jl|wI5"E7HDs}Y|g!c)ś.Q)gc_i]gik#^svD&=Xŧ;ŖL:u8dX's"%mpu:t!&\-.- kA=)dv]ǧGd¾`@ɾ`@ɾ`@ɾ`@ɾ`@ɾ`@ɾ`@ɾ`@y/sB`P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/P/az~_?O;|zi~ayyFBЏ-A!vH>@K[8;|W:aHs"h.O>O>O>O>JB" WgïŪj`9M|pĝa M#+O>O>O>6p'S;<*!' 46 z\H:LK",0l_tW x?r>~L^cKgvZ<<3_ .iMC#a/?_W!N<9os?z=7?^{uzi}GӪRRZq|':MuGPoYo +K'|'|'|'q$%>w߇|Z}G/ ?$}'a|'|'||<߰fɚ-W fV}$EHi!bK*(ɠ6}"TK-͕EK`? J[J|W} 'Fwq!a WKU*VX-խB읆[?Zeiژ^Ol\:mox =*Wڳ #';InOh k:[@]}N:DBU8w=!m㡶+O|'|'|'~*4 }kȷ\O>O>P3 rŚRtg: g 5N0>CL'&)LŚ況=bGuEhC}'{CCm4:{k~O@϶m6:`qEz|m'!Cx*J*ܫnW=%^Yb*=\/p1cZuZ'Z_MCq\qAt9{πl I>~0~w# DdS!).ۄnO). ]#.'O>O>O>O~Z|ݷǷ\j?@ 9_{'|'|'|'q \'\8ρ~ut^6kuϧ HZ|r'|'|QJ;(G#"TiK3V+)EwBF[/jE*zDd^׺E@3e ZchŔ7/]G_qKMY YSjTf ;¹ȠnF]ω^ {uSJzGG{xC' ٷ=^a rx{i)SOyLbj"K\%.u;'#4f t(=V*fX\k]:3ջ,M'~%ya'}K:*z-VU(ZN&7wSVzwpx(yoUzhT"֞FpdnF&Ϳ"TeX̹a`J^ snDM o5Z!;V26_^Pt\`Uo4М/4@cR.TY4feUڂEa92 .vb(IwĿ}s:fai\ƕ}̴Zdܱ := z)N7ijmUOFV,6A3JXTU-<&)>s'bsssAhlZ\c(}媕a7lGF̑^Zi=i`c$Ʋ0OSgO9Nj.Xg2Tn:Ldsɩ_Ϊ&IӝAƮ&ur?$V]YPFTg P$Zk:YlRnTOv<}R.<ϥ3+Đh5-+L4V^,Ԗ<M|zyf&a˵鱶T)EzԷ]wC:ZMnRlhmF 2ʩ,]PIdu+#=XE3h*!&p6X-P(L.kGlUWU7tI%Jo{0^4B*-b?_xGW3)3'O 6Q3q$e&L2VQU->rYfIq tL(y7500$*ŏ4 G7S4kLk 5G7zKZi:E\GAUEMr rt=6+Y+h"DsF|敢ST~g%އ2'&X&AZ>ѹc@Ay 0bP_\4ۢm>ឩPlt˵fMKGIԜ|f|rr5A9&Gl:K8lvu. %5j$6B,T?cNv T~iI`b6.ZfcUPY8p`$288 +:m! ڒ睽d +|ᄅ 4ۘ~rua7D,3($\[eiNXY5&p3;H+ U/.X6r[EX-m?Q+#7ȫb\\YS@Mkbl>ZŌfx4aHwvmEx_c ;? yUeIpGs{wwd-tF@d,L2G$bQS|#RwU D0x6˵,ZE6s-ZNb3==>I z s" Ⅲ֌3])"O *N\i#WI~Y[Ujseҽaa԰54m`~pnYM5\ B\{q\cyӀ*D ϖh٥lkEXRQzemVA愽JDV뮷i[*wD-ހpSg0q!Sh%QCűnAzVYco\[*6J;.e>_ oP#Ӭ?˺Pԫwtz AjBOA-.OSxcӤPZu5#k%ISQq {٪G9m˶ʪiMQ!ThX;ۆ.*B+:&/,@x2\/r;E!KU!<*UӪLRn>[#ku_6(~-K'10_W6YI|*ZK9Ort*+э$2rN`IB~Տh\⾳szTug7XӻSÜQuv!4͖ˋk".rWx쵶`Ũ",D) n2j3&HxBNYXj^sz3%a\'?sж Ok<ue)N f۟e&u5cFGHfW'%(J䪤(a%*|MgDwBnSEFGG#\3ݿ\!5MN~У e"#nOVuS?:o+;^/TXgachߋ%D#͑>u\:\˃3WFQɁx?09y 5#b,5,B)h 4kr :UNfê̪C!iQ(\f2(Tgs>4k̖Jɵb%NN&-6f\}z2y' QwW@m4XgmgmXS[Q[onkod>ֹ)3)e;'k0G7҈^ߒ/!8+ee[xnm787br{sY/^[ȥ;Jr ./=7tT69Jĉ%I&'&Yg)Jw/y4|+<>\%Ү}gGQS%ل!H2h`{֜3Y(fa.xJMRZOA>W\;bl&!P.˩ޜ3\28)ݱT( ]~~OYEVg-*6oyQ+=@nLl:~K)0bIe7Xvml=#Wʙ73UvfoZ6^n 6)ZL*zu6Pz<<5㼦RtNWf(2-G.yQu@k;ܻI_giW;~qp^\X'SkXޜ3S8 -NFsFBtd7#sQIl,'eyzAvhȭ% p"AU5 pq($1l\8Sqɑرcy}qN'c=ǻx8^Xϱomko{/"XWSE{{KJQ ۲Y j] =zƼhjS+m{'rC/ ZjQE+q)H hht^Vekހ5Li׍<ԂU' Cf-8hTM2jNq=C.*%^9GTlǺ:b;߉=Oj K>v-jя<>Tk乷7^h[൴ڱKwfylCl7 Xbw<wjXR\2 KOI R݆ZE%ltYK( *Mx \uR?P%X,LKvL!_(Ank1S%aSQz5Ogp ء}ƙ]X}Zպ(BUQŶ0jV7/ɘxKv=LR.@r丅bn>=Cmt<" -jη H@eh6*ڷR`I5ުz')YE*ow<|/P_v1.2 CdS{t JO*)N0tqbD w* 4u`S|KH a@3=2׺::;I2CNv`?K(1<6G.k'8h@ǃ^ǽ C14-PrővBJWO֮Q7 jG:驚 af[yU߯֔ܳU zN9|L9 ^`.S)K6u;F3i' ^%t=25FjAfzfəzѻgs\swmYG$6[N49),!LEb'W\Φ~ș6mmk{o8L#A q>5`E9K+M*I< UCE_tJTGl/;9Ao{jR~z6kWHc)j2!񍠳(AY5M!_ N3Fq„WzDPxQ z;a*Tdʕξ)Ϋl 4O;?QCWfAJP<4! t zj=V(}<_adtunGy0l) hS9j$BSͧ=~KqWvU2Bvnۇt':L]+Seo&:s09F̹Y(gr^uGDYܨ#p^]&;hZ2 shA-uۍϡyh*:hpF. -A %`H矆!q$ /n3$RǷG,G ?rw8Q"F^RW7Ss 7VX_%nF5pYTC %@22y\/&)%3!NWapDh!C(K B2I5U* Y/.;e(?muBg%ykrZƷumBAC=]NyrGyľZи@OXc']Y?8r0FkQ𶒘@ǡ׽ lz.&}?Gx<\K$H @3#}io(hT"S3fq#<ax_8ҫFNR{YzKgߝƧO:{t_p_'v4C1Au_ kr_y|~q W]˱dBu^]ETR4T崴zNjkݫ} If_lV.xu[Rw\ Ow0rw69NBg8Pd~=\_Q=A{<"1;_6v K5EV@rjQoDpcôҁNaU]Y5` :"Җy(ֆUu0V-=:T&:O~5oqiZw'zcutGm(q,z/d E=Il7/}I6I+>pʃ;L$ z!nk6*VZF~QsIȰGr3 ieJDuB5 a2탎K.2d4zP-rЃ3,g?N1+诨.u-y޿,NS!JjB5j΢aer,zBKeaL(ZJpu[(6n$ ]H/]qxʍZv?;xȣ2R5R0KU,eҕͦ&PPux.[H8TT,0GE夨+_l#GGJ ´ ȠZ삳gJ߭1ۛ;wlo)@io|a&~9+}#X]PCz,o6C˝k}A-[PYV>x7[FnūTlT1dSe$"zeY,xY|^zkzv,W2izΔݨlxE?[fARR 9c?3i ]G Ηuu Y!20r+(=ƪFYWD(x{rI0Uh]tgvK D*5#UD JlǠZ0qh|P U01˥%굈L|(t6]XK 38e{:1h)vJ ۉO.nzCcc}Q#¬ۅZ2vTcQXsNóFO0#@7 @j4(@;@V$FRPWO<6DsX@c ǹ|I YDooHii@ ]#vMAǟrw&m 30 ͏z ΣOLM7~6ޭyAH0&^n%uw7* oWXޤۍjQm=*cjv\'!n3_ nddZ5#%Txx:C{ B0`Ywr{qweƍ'M;U cWR{E5ѳ%2!,35iظnaүfgo?TKi,̦~&'?c/mҼ8%^ 䈱QT=yH7}xP')} pe@ݬWoAQ2}dUnV\kkx2!.2ɾ a8 zYnVl2&u(&aaPy9 (;SSYqÀK)/]XQCdKb9nx ]( ;w9]ݿbMY BԿVP{~Ȅnz|S=7 A}*F\iR[`'֖X@6,`ӇKU4$FFD):Zq%EPNuf~n|#'QKjv秼 /2h$-^wGp>K.C9D+lRo ol9s=+2&?[H zUpFtZ5c6?THȬFvPLkR˚ []8FFB񜎓z}؍NFۍw_fGc #cf_`h0/E8G»*qA2>L]"a \#%WT̙=QGF[7ڀ.H_\>!@'C`pu++t^ֱ骇/h9 ,W-vP l 1m] *KD..V+s>/d"g5yiILׂ$cZcH] 3Ӧ1-RX[A@RJlA8D=|u5PA4?Ľ0Ja8t/d=ăL-$6GB{qB)ք@m\ uYvx4p[? G\4t;>IXDچYuJVQ)iYN- 5R$r.s# #G99ٙSϫ}hgfnu秢hw+聻}W͆@KUh8= j **!SbNnŮ/KR޻Di.Yۄ˝Tcub LߑJĀ~ W AByUn%ܺQZ\YXBv.,,ޜ9ZFmSNR?\JinmBeza1 %n a|ᚄ9bD0|㹅ILl#7юӧcF] M(0WGP# F "*瓰EG>h;!N=¢fSs3snUk;οmHIqnC*{Y ʵnmi!?jaZN37Ni^F՝~ q>鞣[/3Ỷo: *. O&xv7zB 3_оVkOG̼@BY,?l0]ڳ ʙaעD__ܩ4U^k^x w6[_ pսR Ҏۉa72ĢTH& b*iVS ۔'kKɑ—Q)t Ƙ3픋SLg #ƝlFY_3=EL h2n9,qzN:qyN(;]ބ=/* 5.r~3fT5͋ Ą++7A7ItRc_ ix$b)1HRaНLJĦfw׉ y[ZKTpeWp n1W ?Tˑ{LQ뙝 4Ҵ%d>Kqn={wS K-;/"x``sH;Փ}KjSZB'~\`O;;ՍO; ~X"]<Qns^[0vwnCb5X&ӺZ s^ 3}rˬSDn;{M-q:>uc BDuh2{qe军9&I7 YĞ>}0h=?e]UnUybNT_ K'.o:)L3~Ƙ6C YGhɱ#/n[[|BP/@e`ɝIFϒU|K7Յk$ߪo74>Ůd@)㺣FDM%燐;Prm'Qrc Xx:Q5A'Boux'7{YAaaשd~T `?Y]zW)Q tMs(xRx7 %5 24uԇB5{GYAC|cԑ۰PG:+01V\^q*'ʚ!ckr*RgjX 8 )!!D-5J 6O-"S4X|(EWR8x)n{8f}sfSZj]I^*'ۯn|)a4lHQ\ghy%E(gUVxXxdC55BS80ƾ=J\p9ϯՙ;S*n$ם?j9AQ bH/T?TV^44 L9}_ SꍍJ[{UmlO=j+5) {v{ 5=.#Hw4EC"sqk(tF09OsSmVҿ w'0 KdW\pD@E|`D j_1_P\ WZ!24Ujʍ6 QF&}I"j/*0Z8(>0XAez5 BΪw ӱY:´2J'ݑ;MgXᑥfLZ4Yu((:t>óXE['($—FEu׍MV;Xp*RxŤs%aޚ`X|f4HTQlAPV(ꕵrBUl+wr=0dS!,qm xe,G^YvڢaL47q* ]BGXcec%zFz{A6+@OK!ī'5 6ԖsO˃ p>O GG;B_ox25Ěu#rq| TI¯C 1:GA_Mc#!R-w?΂0}"$M@n$?4dK`w(ɑI75j`pA-_-ܒohU*;wPzQJ:{ᤢ0ͤz-Pn{)Q>CiJ+j}ΆS@׭ŗ| Mz'!_[!v,P F]{ko_H^v2թPƂƂnZ:t,fG/[*/_'5V_ oK$? tXuHB 'x:\Cr դ٘|fIݩv&(=G XOl;cH2t &wdfA +I|U9&U0sGaH^(eBn !=oTjUv/gD,V2?#2Ъ>L[fqa:ϛ{O+ikW4/2~wqNȯ(øjk;0$Ftjʍփgo.57HPGxa4bl,IiN%ja3aȔzjt/2_@+>ˌ ze(_@bZ~Y%teqNGr383RD !^Rx=wJVo:hpxf?L}^.64A޵a/ɈCZ, (f}Z,- Wj |Y,{н]0Ƹ+X+CiS[ e"uY_FfցN"10 Fd4 z@庎T댗z;އ} tP;C 2}ӌ}پzg^g xKẸ̆UGOnk+{VVM Ϸ66j6cØGo<:v!-( F=7ǠMi"`knhRo2w}x mM;a?Պݲu_rK "aZ}аuzczm1St+ˋRq%y S"Յjq%~:$X%(X\( l݈^s-!t|ֆ֖[շBg7ڜk'Bq˖VtL}L `/솯9JQNgݓG#0Y\޸q#[*.yRiyy5GWXcV VlP&ىYrI Vqw<J4/nN[83ߒ@;p?'15LTCJNyNx鲈 mBq1Kk1hm.c1;O_R軅Ҧς00uaT(ԾVz٨ ywpx잍etD(k"ab}P1DrBC wen>5L$gLFF K{;7 \}K7!^r CE @PWNV>rݺ:9P>=A}1*ER$G''"|ۇ/y7zQf$)W-@&Q[ic <mC,?Da2U"uB ]aܱGtNGoDp;|R'fjvPxBq€SuicE Oǖ/VBIA(~NX.jb—A9}i܏M}ə[آ?4ۮ[h&zADP1i7NI./LPRc]C~gjnlJYy5GЏO ?e N/s76^ƴЎO~Il ʦMXs~zJMt~m6b6"N-@ҽ)_zl=U}H:לO- ԐG%1L |Gf i"OܸPg ]8-2Z&+_}}.Jۥ?WčrmW^s~*WP<:n-58rfʃFת#f=sW%Iw^ i&KJ]MhHR40f 1 xoj,̀CBT!4;&< Jvb {Sf4A\+= ću,qJKj9Y0[gzo3dߣ q0drRd {{jc%58IpdN1V7*VNyfA祅5'ѿ.bJ JL`* Ҵ;B;7C٥K|dfH0&X ؔ ,ȕg#K9b#EiC YMw5#atiR+͍ 9+ЕFfݬoxN_Y4IZX\Lዎ6EbXȗb'roMac ]FŠUru=?JTWɆk*bK;J#%E)%21m.Iv84QTw.3( m &k܅P>ub`gL Gcd`O,+jWS鸇lj--~V؟# \]c3S0N bc^de&قsh~:D\93sNf>l8q~g[ծd Yr}"-!1Pc?c01B C5@&@\"0T[xPo`c͊fܨܩ`3AXmvKl Nq}ZSh!"W[B :?~"NYDI`6؍mXC얋JNXX\\4G1ӑ&}-řR29C޻>@3) ꤷbS/zJ׮Rܩ)`? K̹dkG%|T2)_?zQu.:# rPb)%~C%5m*͹.PuP\-zٗh_e0ŝśG^Dž#⾘Dž NMqi y\xee@S]ӻ]')]ZVØ ',SB7 YZc.ākz5I c@}kXtQ +~G7:+Td.J: *|JIV@2Ltئ@)lFV3LT}h#)AH\\ќ.)n2|4*ȏk/bRAcz)MS XTj~jjS&|!K@e_\ ՒWEITk7>o8mP> !@-3,la@A@3rs~t ~QBF?[R@wF&u\&ۃDU.Ⱦ[=̜~Gtu 2V&6|Ekǹr9B7!IE*ЈO,v9Mu6| aRhD zA".JܫwZ!ZlF.1>٢c и _nI}r::scy4̀X@]Hw4f6sD.TF]>S98*[ܭpl=>F|ׁq٨<ēn+;x;̻TFe}< 0{[ԡO".м=EO#^OT,8{nѯ!vC u0Ki #{:" PԖ7"-M6ZnGFH38ef< rs( xŰ@@K Ԗmz$X;Xn$B9x`i_;(7iƋx Bnիg8/sgTVwZQO^G!fC~5-#s8pk'Bѐ qOuUMPՑ :@^| 4񟀒Ѓؼ u< |<Ýud m;"DZӕʶn结ځe]ه#v(Ht MtK:f.(%!' .Y&XQ=E8[Fv. eK_eՍ};\_&l3`׍睟t^j;w=DO>Z(QSlVK(V#~ 1XYTL]3if D[=|\5[o: S̪U* =y,ɕQq*CpξD ox.Ǔ=?:AyDse.ycJ7>QWnd0`_;fi$E~$,}ϧy,0 È?9uzH݀0qd\diTI}#ȊCt+NJup3dd*Px֓=t@ydJOޕw#?'d:t?֠u#ZyBP5RVUhzԮ,S1+`<I^njӜ ;VDC)MQLho Bw" ]amb]:kTZZqb?1LtթV"#6:Ω ڧ0ڝp&\1/y̫Rо.=(LB4NH<5\ "TC"q8{yc@\88 SJ^Dm횴T 榾Twz}Ry[fZs?6Pę-A=!&OKL %aʸ_j&7GjrsZqLnde6r`) b:k SA 6M30y/1N VZQ+vMo\S͑a $_TOe`(O[;4JvP[ K%4 zz=67>X7_v"7"I"j`xPmSii'E١Tȥ-j|+@ Kdl[?mH7!ATOg!Z$$߼jԷ~yUC<PMN^gjV5tq "H[%Z(]G qLtDGp yv~=Axk=^ \4jNMXְ /6!7vaMIyVH rvKa? aoSR> \W[@xܽȚb@_;djL YU[ﳱE Zy O/c#m5U 7O8ҩO3[$zkխG>Q\? zvCht9ժnl`K|*y}4o{AMAsa=Md2JRF<#&au;Q_' V[YXb"ԌI^}AM 8ΈB0R.bwv)4w:Zl G㎑Ԫq5s1CZr Wwmp+u`rgSOf:oa[q6фcR׊7 ŅKśŕo' 1anqxsiiaybiBqiP*,X.--'(.QG7Hs{B:SSyUZЮGr@dvZҥZXq |7$̲,ҊlUfM@4 y`7uua\A֨5D4p "u`BO\gņsu.Ib(ēM=fqlT}iTZքO̵3Q;(DRJ%ٻGJ0{\v׆E,> 0"\8-1^G[:*n.[p#>`*7!ub62mG7{,/OsM~Y$,:f1ƆIկa -D6kG5$yAғ~S6:o /"n,yl7>jS§2,f?qׅ|i!ckfGY&AD^ye/g/*ivΔFehoI8}4*~KFϭz Ԩy#L'YE"MmK9/,6 jêMA8N֙];6imx2=e `Z)TqPZ uKc8+0…6%"ȊE);t>3Bيci vԺ\Nmmė.3Lzv-{y'!˪(rNbS N}RPcyz5>A| a(d`ѥNp .+[pRSs6F囜TbR酱[q\YAnӛ*҃Cs҆} ^Ԝ)zl+e4#Ꭿ#Qαʻ/S܄Hr症{|yktgv?)1\ugo8F`~> v81uhhX`;9U~GuX*Þ佺8+N֠@nJFE^ztLkN[un^FWըlWʭLOg控mދ)/⽎e9XVq9K"|S:@a#5}f!0fQDջ&0ج֪YGFqtF@/o(Y?||mnŧk87<`F6v7P'>s"f1 <=voMN|h:5[ @d!{YNFfESB 4\50 -@ `4u]5&>)R6vPKAEĽc-L{7lagTiwlNoqNIfbe\ls p,Op~5ۨ紙F^؁g[V DUMdcM6Fb\`ɶQ=KAXxmJm=iv:\ج4^wl)JM~LӨ7l߯s}=SGĄ)MYKyhTm5d|Ͳz*V˕YygvSUQR`{$p!ڱ)aL]äֵ w~fn`Ӭbڅ+BV8:]V<}<2x(;UŤD1–:PCr$@5 ^ 5zIT<*+n @2acK/^"K>. (z]7lvQƌ XbEتX|%ͻƊZ>%,+'dI ɢp׹7w0*++TO_X`'.pnTnk\-A9h BqYT ֯uB b]fzy|0g}~J;~'|9^nnE?;vVFJuimi'_t 8;!Ǭx%u`^^ꀨ;-^荤ڟ={^SB -#Wd Y/ _,)}[3]ƅ\K0WūvBy7_vBKk&j5vȍNX` ^8 ]r\t^ȺOGAK@ޚ8ՔO-X.Kt}ɩwznhԋ2eL/]q:gxzxA!%g#2=H[,Bf1:nY*1tjtQ(zAPIbdȐ3t wt q:E T (YNN\\2Tjg G8?c$Hbeq s4I`ԏ #rLof} ڴFUoתݽEݺy $Y ЩHPA( ca]Z؏Bӹt"md$ۯyGl;&rD1`N_^/;!3iΔNQYoT_s4guF9' 2Vdh˻&Ih԰9hq\\8s}8p߻,+E\w.O|tH5bC_uHWqAs.m9PG? /N~~&y"eۺۺM:U~m^ޠh *7[`QZ&#jRA)>J s"O<Ёe:"(X᪍@BljqJu8GҖiRLĮvgBHඅ!:Wv{A#}Cj8z; Ď-*wn`"'>Kfk!@ؒXLM}eACrp[H)ǽDC 08<KL8VGg|rxx0jNDGJ'v0 "7Nfco66V b 8}̮g}~. 9 nO\-"HD' vO7ꀖlBAp61OĿY4z\kţ^(/1}7xϫ;rihhI3LCR_9]LrU RšW2& g;VE(P?B"ౠMg"V×*۹ 68G}T+FpKAaXD'~Xl? +SMG$hбMpz>t;Y:ҽL\fus"G'jUz:Y0A sl4@K=y\|5-{c"&T#@<zM#Z#x8xc+"<4]9e rβ7Zbq;Iͧ~tXHoG ? Aq9q^OMKo{2M-"(KI dZe4Ra,'`3[i)O;߮# 3pɲa]ښ'!mR^*sX(D]=&a(v*hHm <YV=P,A*,-}.3킨SVW;ǒ$P3O3xi#&s WlʢGe^bC;`+TM|&p$KR dF>,Djj5> bi~˷}s֤f1ƈ4(Yb"[kOe&LEu=CG]ޫiK9 λSEvn#q}8 0ǬsTZ؏O`NU$='zI8J],[80;hxnzژhU'An/V$vzt%p6>Tszt2Ypt<Rg;'|#Xn)CQa{Sx0 6qsuwqN { גqy Y a9_`ߠ 8g1ihX/4Ww8Gҋx"ˁ:M8) 8@7h3>Ȑً~'/Ɣ"VW˩mSM}$GFm7(2.uuU-&BVk#=oPPvfC>\yLq(f]~UCGb(ڲ0~f|VʩT1+}|Yos @.1`":=:.Ms s!g :Hy= běCIYog=,C 1"ȏ`3`tpwt?)۝D΀gN1R+hSRV)R|Jzt<:nA KOMTǙ3+NnYO{źACTY"zDᣁNU\1CV H *i ЁEͬ*kjc;"}!;vOaW"Ea? .ѿ nǠkUrQ.>(K;|̗|&ԥsb} ;bʀThʱ,<麚 eF́K[ }K eūAp%V&`i\|Y+w6ADp@:DV"h\ 1Ţ]2&y0r#|BNckpUᑗ抋rx} GT"n-GP%u*溍 ŲDga * [sbEe!Apw.B U>Q2+KKT@\4C"BHʨFU˥b*`Ժލ+7J7c)BŅ待zWɠGs6oKB?!@ MڦD(q4u+{_0P{7 N&AXG|X`Ĺsg)RڃE[ ],dE6`bQI$B%4%v<ۿG,F NM.̳>ggj3+i$<H$ivO[j*SW&׺p[U`Y €'݉ZX=oϬz^C]^oN;:qʄ˙ #2T9^iN%CDI]hFDڳ^w~{{̻its/T8ƨ  Sv:q lczjqせ֗&n Z}>J@㰻dɒ|DfO͘|DS5{cv8eL18y )᠏W=ƶ [G!0(z{JszjըlpAE=.7ȗ\Wf"0zt .X;qNA'GLvKO}n߬xՕd}OH>YPfQ bidOԃBBv h݊ 1`߁6;F5*.d? ߍ-i´\´\+"Eh \4=>i_o$86~Ծ^fsh!kRnR^cWT=TP{ 2U/&EPoGrDՃH[7@Cg ]t1@!|yMߎNaۧ7+֙Ȭ1@m%{u:vfό[e/Mʽ0}nyF,re^OyZD,?Rd_feV2Sqewd^5s?0wꍭrM3rEO2ȨCbȍ~3oo7\ښ6\5Hf/K'-u @p$v'i a,HaLyk3eL1wKPʫbS8'?{^B!/,eʙ~9ljfk{l!덬n&yIn; ДN [v}P@ cO8av-gR [.a%5 {=J=.'^ # "),CȏiGR$L T։z6acZķlKF33 L3E xs<Qob+ϑGj5.'%Ŷn[ϧ.+7 Eu.z]/zea ~]D`NSK&%|*{!;"CuC{ĮԆ#@ԋ@wÁ+!wFPummp`S`ťZSػbK90ÝsxqYA{J%Ṳyk0#i1uWdxQJP=d͌ 4ozFGfkJ J}T]}48Dax^dZA] > ƙ7վ3rZq77}Pmψfgf֞ZAtdK߼rmۭcZSs͵Mq-S+aѠ Ǚr(ÅO9ؤ˂BZ ZE~I 7Eڹ+NاF vDr_ , @ox hQMqpFbc(]*߈B8bKI,#jǨ-%'4b}bz#cv%^'5)44ur"r 1k&'D8N D(H =fRQw w4V'_'E jp{nE$,WCH5at# YZ`fI#Kh飏AI~˔AAj/Y >53,F#-NA]r1ͷ9t G+wQLCMA(P8,i! QpOQ8a2[0ڗjm8u7}vqPyX H4_KU%ԙPq¨r*OC'+ #kޒ6&ƒ u42^mpn}s %%ge ڄ)CYI|e͝L<^no ޕȪ1VbXR[yo65p^T*5o?礴 Mp10;BX* ـb|H*Qϲא5P Π<]&sP{l/Fꁪmռѿ.r@<ս0 s`l`7hj Qw=5piaB/T!&J/J(eJ؝ jti>1GPQ\Ր^2\,GbJ?]ַ9رPZߥ,:@d <\}!ID5<1w{XӋQ=^4 '| {U3s_ 绵Ez,S$9Drz$@̀?\;7RF8V:3vUY2-nTA <+Q־{[[J a$"WC'ZrkC jNQGQl`ˍXcӇ?T]&{Il8qx䭗UiPj$Gct =Q8rŨ̂mG+f. v 򎷉'e ([-1D(%Ƕ2Q l(ߦt|3T %4]oG~3h=ڮܔzIQLgRO˅t'yy8< ?$ycz9e%!C+{)C%U%,/ =&H:+X^q=/O 8oX wk]{,BgQoID'@/lgsiޜaq<3E IUh,0lVpD@49@]'M1@=+ś˥B2 ,ť7oba1p< KK BaALqś7o&/ծk]/9]xw9~' !sy5唂r-V ;A>\r'JHJ+Z>Kq _Be'eyYya3ҿ3L2+o>nH*@[y;HWyXm{BC|}o_w 2rr4BX"]I11Y sاqT@آt=ǟ YeTv4pb$.uYK:ֺi-9]MDnpˁpnJnG! N*tNae bq/TL}w{d:{88dd#Oiח9?5Sr߮oAQ=tp:fc>|! z:jH Cw)ymг ɒ,{R*kȏMtN9r@ykf߶ra2ogAqS 8AUa AvYs 1Rsʦ~l2ڟW Z`5-Q bH&X0+~jy޴`z[EKqgx6jes"85W}b:2_cUݑGzwS%,NQ7*NS=t`jVԫjJ*ٞ{&!4΃͆PU>6OIM&ՁPrsF8+2[^"`|ˈ oAfxCvOԶ>$7t?Glą.DSY^v?=6b7m/ݩJ xIcE'W)[ȷtWo3PMt4S+qb)\U:g{%etR?\U.|ugu6F (hkf2 ȭ wc٭9&R!D r.7ʝʳ .kE Gy0gq-.46UVgYwICbceGո8I4Ys6q>ja9(-#:FD~ o'Eŵ!HyVbLUjzx1^=GSsZgOkP$>^7AeIo%F}F/I|A5ѪkuyN>Èߤ|RݪQ$h˨ ܙ6~.)ň H ռQ݅dR=G" y͗s׻Մkrp;IttV'K7+m!)D6_y1/ؘ s`j_%판 V:6llH4PЁa XR$‚VhbbS}hu]5\SS29ĆQ] {ս~Lq>)/"9fֽpm6& WUocL`@ZrK4c2--Lx" !SU)b|٥?=)ZglƙRnVk8?E1=0m-aI]3ĎiJyE( :(b31lu)J򥸹T(.,X)n:$ f^0Y ^&N3jnl[X7R @_[tVa5%laz:mvϔ}g߰Y%cŒu-fK+ٕ%wF^סkU|gp1Bi _P⚺fymUQ!wlhw7:sBV]h.0Ot8.>- cA8&>fPBd|0I87jg A *u8V'4!&ǖZB~{SVM[pð(3YH RDQ `Acd^ h7:qw`):6%i1D 'm 0@wܡˁݎtpx"[7p zVzC.Mb>/ؗc 7JRW<;gZ.|Q_O!hbD$(>?+*}`;*<}sX1yggәofY=_-KX@S K|߫D t\vԶ`D7oZɂ -Wτ bNl( IpdF,_ZSLOL;4&!,ϽrmcSI0Ѹq"4훎$@>KGJ]ay5 %Gxy"f̝(d 9M Ҕ8-6PWw9T]5矆S `F.aB.F Qwgs!_ -ڼq9.b95 }?%Vv6>f1h #}Z;jO4%uIG(F"K&lIf_:"k@@Oۗ # \`K\Mi)> 1SOԄn!v}do~X&Lpsb!^X f-yZ=V3eg@MNQ,Z:%C*ӟ z4hpe. ,ֈ.o$HYc"%j>׋rHw}>b\wZ_p~;ӶֽZЈkzLI4#N}%3Y}r=[,Q?jWHiZX{ ȵO~12m7shEmzxı3uٿw;v4=i ~K G`8vԫ{WVn(\+o>jVyE2~, 2'}Ӿ\'쌇8L:,& T\KDu$JB`).@bQL* Jʕ7z_DVbuw EF|7U!S'h0!M eῴ,"ڲlCMUGvm)6s+ya(TV #k6 tN^>.,_G*tc?e|À }xd6>&}5޸r=M+jrT:UpZkTS$\cAL耆|s8vR3tG%I6ʄoXND\K9N;a nFk_w|Fco_7uߘ(< e_6WCMB'"SA%"qz ɷRHhz =m_gNH&)i[hUʭmqN; 4;78J6mכmJ6wLӨlEq'm8V ~]Tou x{z =Dj0 $O6XOtCB\1AkL㾄H)ȖK8\6fĥ:-595aV@6>Vwdu㘍W660qqCvXT(ȖAFDGfL^\S?trڢy5!Xqs0Rzx&`J8˓@9>Zԋ7O>X~mzwGw;l6|Z.R86PcDSPυbD\z67myzG.кlXx@axۙgjrljNy4#i3f3 I\W%r39#!K|"C칠dW&AߛHH:(j4ҥ3ŔR uԽXI ZWD5eII;Xu+y%L ;C7 Lj -.Q1>h#-POupkշ)-5ǡy ̡JvZV#}Dʵ)pŻ?(3 Ⴝe@ɺ0gZϛE#kFiFL(45jm5(gdX Z~W%l:ok$K mr,EPI!,ei(΍M` %.ߟk`լT={IZۈˎ%g)DdK{[I JĢhJjdBD x7^J`h3^A_6oQv*ѡ̔P^ђhfgl +'v®8wjkr~!hMɸD,G0L"2k8P=pn#OE_M}b\aShE478WA-&u)B{bb@Kbieqeq]wVWE5G!)(-B1IC1=A ˞%j5,Թ)YM}@ c⍃nQS0#F#%G2SZ+6(5ǰ[Uf}ɜ{݊ž,F̛1`GQo(-0Yrп%לEZ4|s} K8NmkBE($́N#xȑf 1Q>UobFbk 8JenJ-uӄb"8g{b8N1X)Z(2ZJnZ-̃)j\PW8^C b=iH6}Tސ2N'份HDGҠe;BĈdħ95|hc՟zE8Dqn8 [:)8`jݘb"/J,oʉC>r7SXBz={7J pT͛遃~8pGHR>73,I5%t3בeI!. ͲKY6"CQ/qm QT#TZxrhh;AAtG Ex8yՍ[k L֪ k/Fl$^pGwo}T-QTߏieampx*p ]LFKoȶuXWn;y7}pg*M 5Τ;-i.|I֬~7]]k5Ŭ#Æ0WmM;=lx&B̚)hLh4Jn$M^SAHZY%)#/NpcHuw3"-3eeRv {Q;۾>f(" ^/#_jC8ZsYu2fsSMÖZr[ޝz "K-vz, \0Ն'F n\\4sa=`ՈT;C'4}|hugLju~ӄ@ 8pɼtTXicE䯰˾|wKۤaRۺ+"9;fI<Jѯ8_}BAp)3|5I{W2`i7pT[zv8Y1c[F1xGCl뇖|,ATԨGh=}g|s֚`edҦC%˜?Hks:qu ;h5]& YI~+-6Z>em$WlV@炫o0= Fo|Fz@"xR8˶'dysnT+1nkAKv1{(J,AD=k,L025i2hk' s]2 qյW-7>ExIhwWCsik4#f 7~#} A πM >Y'@{AʝD`(&F>qtk1]8:,@4Q @55P9TZ/!$%B$)'Wn_PUQ oƈKÀPe{P<#]9ƠrE q+WDSǔ'N}|w>#VQ||9`%ۨl`<< Ty؂Ҭ~ C}fZU-o$*i;:a݂B)M@wZj 5^pj\omspqak%EG?R뉯I % ;|MhYџT{ĪAdEmXgllٚMxa;mrf>upf +L/#x}'R1#pCGOv`UNڑElZTOC@.; խ dfZ0_y7fL },51R6S8,"UȖ@I %(ƚ3R]Pfki++G6̥8jd! L&QaCp.BC<ViJw`=D(쨓R B )~}ݦSy3nCMrɘȑIR{aS'd)BU@}Yujyy<]]e;nM/zy^ \V޾T!w=%p 1ӭ{5hK^Y%5;O0bBܥ $Cg#) )S!u 9'e:F A 9Z ~YG) ܯԒNbߚ_ E9EP4`#x9yܲ3"$nJq E…PCfE"l  ?>-q#ǥesXT`nn&rF1t/ɀ ˔I5\Spi|" B.8fn$G[n[e퓪6y0`Gpx~OPW'tYp<-_/ [E,;}d4|l! $ySZy0_:,f*3Xڜ"VΕ2 8"N.ĎMX񢺅mNMj\rݑ&t9 :oŻy76>zR쉻(_ XhE/:#Xw6y|Rf82k&P{BДyp0 t}MU]0AyqܝOa޻KN{gV[͠/T .;8qRܥG]y'sb-d=2k\OQC *=Ml9|`ܧȫ#N}fAY8Mн#|`ȵǺ0Xlݙ1"Z#!` F<ϾS[Vu!g6zh~e+m!F,<; WxgvEK;_^5/EsomP7}+YK,|554 [2 \%vXu l:O4ar%.צ9Hz^!Z|]'矝3s|Q,o:@[hUC 㠚(3A+~} aA;,J?"TK;DGHYU,5˸*Y=XL'󈊅۽P#ctl*&'*p$2 ;1i/|_}y$_!b@[zh8}q9G$% R * 6%M^ȵX_(,@zr:&Ym_]Յ>\;*[$Jo" ?BSW_" Ɗ34џQ$҆$8.ex>33#˅BU͸`Ȏȃ)!U/Oԅ&mvOO #Ԣ93tA)7fJ뛏~NT?lmzgzyx |C;>'@J|mR ߭xڑr2>Νf1-e7̩F$)^*k>%ADXͧԠF}X2D%{|jfOB4:Jj5a=}~յwݷߞ*;cy*L呗J^_ßZuVV7*VN=v{b1_"]`w6*ZVQG.sRakVe+ì2io\سWx ;k wZ]ah{,}fٵ~fcN6SHk#wvF1#}R4t^}pZHنs6c/iS }:9% qJd> PđJ+oůd>hpu,p GROĺƻ1بܯXJȮ*V`7d -uQpq]Z*[=3, `" k 31!xu?lY&F@.`K1hN0(G|9i 1A @jQX;s \TzjJwA pN.Ĉkb|= uY]s&-  H-jY5s|ZV>;  8> :6ȜpD>$Ʋ&u;_φWc 0 ըK Z^X>P=2b; m)XO7C(A|(8}:y0dGM "jm3gY|NOԿ=E8"LkٞxS|!%}VSٸima8O}/dQʤ qyy@߭ >g k; lhtHfUZ0(F4;ObWC>5ع'gv2Ĝ}tXXl_ Ikt D,"H =ah׉V->ipu-˅Dl:kӍjAA㺅 QE0`>$2:)ZC fny=k/?^UNfƲQiUk_ؤ<*$S8`B//soOK8Ew:ͯ ZkވL!HhyJ={ْ !4"3MF-ڧSېðBm/ }=f㜘e*L[*5񼢢 ,Djb?O1My)SXupk*Dh@b]4lg:?9תzAz"aۂt\$@b~== p"3kջjet3f0>$-K:uN!B*pZT.(H֋=t"gONwO1Q<tOwS>CS{+X pzEy1݉c̚aNm]Zpdn@ \͘uVer9µ:)a![Z} z ]I9w?c[WM 㢙eZy"H5k_6E̦3s~ g`0@%|60W$|+gqSɺwʛ ;kKSO}oY=Ql 8?dƷZ%pREO`}x`((E\!P(J+++K +7Uq[\rt/1bo/b{wÏ~@Td.G'~ cZ0M)gҢ8~_SҏFeU|K3nfk77Ԛջʆ*`J"oN= % }ے"ʜPMhJzA}SWg! Ju8sY/׍x򛷛!#isu6$TʼEpl~uը[rD_&ZJaf}CC,DgL+bض `C! Wv#扢o[)kJ@H~"'An(Ut !`v͠ |b#$wd*ڐ!c .jV5ݿ7?̧7YPJ3AAC3$qnQxՆGH;h4['Tu sn$U<77q5#UW"G:Bc (T3@pLx*r^P1C"QҊ9NY(mq`U̓Ex饖2%UlG/6O{m>Gt~Tvh %j\ L#1yP4;SE?=њuY @Vh!c_@j.g$Ѧ̻Gvg{N) ,ѳD +5"0dq1~x@o ,/`8' VrUp80ݏ說3\l9ILj=HLqC*;\&}Rjb 2]@ _6 '!2!2_ JIQ =2'nObfedME_lo7Ye`Zh>&G '-y#*P 'bv7_@_O1?wkguv^Rm= 6_tltns% Qs%أ\ޙK=H@<9df-lӬE  rP(uKD`r`<4ht0j=mG4/:L,HUb?yO|d&{;;W׊p`w1DE9Z8gVb-Մ 4XIKC@(JLSB^ȉNE/`gSJΝyoqk {n{&\@ `Sy/8rM0ǾW.l!Y~ee4[Nug"Ko)t"2f JK2-ϝPsyK _qhhzK2ɱUڮ7M"fy1!!@]C}t/<DFZZ.:Y.㲓`y39@}LEPO(K=jtΧ%S;A[f ;|Unȗ=5O`o? [K|_%P@'<QJK;2+H(RGb[:Jqm]į mǀFckTr88=Jp~o2Y/ma|X1h DґEp'8j?hnrz5_(cP?BI0ޞyE?aCz+;Čj/5F۸j]]hG:eC Cvڤ^$6?'z$ *,/.J8K,D乑+s XX](WC)jZPpn3瀷e~g^k{z1Oe6"tu/;"̃bp']+'! Mh= Zr#Qղ\Q) qR~3\Vt>Oo|I21#F%Q6]L1烓љwE'$jj<˝0"G"zӞƄMb_WtTγ+^2T3dW_mS:\P4rfހ$wT;l|'Kz?&.-[?F/D)G `=>nbms1u0h7t6G ., w8d*F#Se $1Ca J%w[N_fWx]9uÈK.ox]yKrϋe8N 9?0SC~uH۽!ܘ֦H@kjs# JLB !-Wޗ`z>18^ -<ȃHUhX5Ct}?^1 Dup_7SV~w<T?[lū^'QbVri)2~*;٬zG`p VAXKf\XM7j#Th{/$ɢ.icNzUEkgS{2|ApB:gAţa ,cJ$;j:Xe'J;?PPnd_tЁ:I^N}xjs_"SV3Iۗt w}\=uGYsg`L0Yo')JչN%K ? [ $셗0R\ˠVެ¨hz3jG>OD{ h;Lubӵ~lzvhW4/p6g?c;Nj[؀ i&?"F$_D(FT[㟅Mtze nuBa~"x~Le|\+U{-7>xJ/O-"qoN3DM%Zo1x#D틨6Bl5$ pCP5s %  JC9d bV)ʤ톺4k[3]ٍ: hK}JN\KVWABT5J/7l {!Hd<驄"6 4 39s lJ{lQ2zyu DUƤͺ6 dfPv`8B⍮1ltk[F)kw2~j%%Ǜ(;A!8~-h$LKa8Ro$lqui٬oWKUԵ78 2G1̌^fk1Mڛ}Cτbl9" |r0"0mո!)p -:rЖLfZ{詿a#&~T rt~6|wR1єuG4blM^i'ԩ$%v~y:@3gBFqPbcT1 aZ}I7lJI~O~.i5PLE{TS#RXc~﨤Dާ"%F\uwա6/߸0L e&L!tڥTO9RWs0 s|햚3{X3͔ ?]Uͦ~GjoZ˭zwXQ0%$P ;cۣp0|  H?i)# 2D r/ouMҥJT/8zT(,oe:3Nmf')"O3*bpQAѿL`KkG&τx|bؒdhUIWF׵|R|fB%$w_Q^}nj =Qup8] k4 ^qG,*N1$Kj;?eU߯ (B~RzTS@aM؉HBd~ݎ3h>rhJ<}rx4ȩthkIh:*՝-1RRn0K"/̻y\N]7ٻ_ܩ4=̝IwjܲtOѸW?s _<={4npm2qM.4{ATa٬nmoϛM,]+D0yoQ+Z|Ic5FZpJ sꮂ࣡zwR\l I>R|̺"A0E<jKOh݀;`%$tC~BCd~D0 -r* E,`V۠o1a¥7Xig|N,!vsUkppݡj:~fo\1p4&`DSPx|w|XEaCpGQwS۠PPi,pr$%b 蛗~l":2U;pAK7*w*|Ȭcx=PX>D2*yWtXG&қ' h(QyՎ@S2yT"gNC|z]ϳ@ ITc9sRtK6P@O9Ÿs\H% LK d]`=//yŢؾ0N/۳bf "wg2/LfEΙ,ν3u&sv&dn̙eXˈM ےނW\[qoqP]P,2#Ft)V)H98f})0]/0 3x{gZ?LB}JguLEoڧ~u3Η4gf xƞ^: ]_X5Y\P. 򅎯pq#1Wn?Ȝ&vVfwz3-ijǃ2>bL116&|=*_粊C< KHZtp[Zc F(Eba 5+d]ĺ HaIhZtm(SXLC"FxLfjL4L!GnRBI{8}z.]` !6sk+7D)0Di& LJb{Mp K|22*@5nx˴"=Uny1g(E(b'BEk:l VQ *I?1Q6DO]Sn1q{c}b6Fߋ 5e3}z B[;:ۙO4dQ@$C~cYQeDad:ۏ8`Λ &>z!@ꮸV^3ȍGv 80z[dݤGin1B$$ف:gdF\2:&,u-SHѰ{tG"(r!˛V@}.3:JFRSF0JT7Z"w4VQ&4 הAJB"w9  z!FM䃉J<9:Uٺ#$mX/]9Bj=Fs"Rz&J-1r锐Uۆٮ z7ǵA3MIn,Xn_\ăW˗{t3n^<77)(7Ws Q ]-Qhhl|۳6ϼ[uOra+6Dͅ7  jb(|ƍRMtpj!+/ 8{jIi%sz1EA-5ǝ.L+]G4w!]ըn;>2 *K1CcBv`)C մ9PXRP$E堡+)*7w:?Wk>QuvǻJm=(--T7t w4">#VިzI<"虠RGkf %'DU!zHI'l՚˃ّK( J*11~C1n\Qqp0vRUjt[5FDCixvWer`< tQQYH+Z fqn3+ 5 ˙%p2I r wvg 3WC甊ٛQ^Tڋ 꾓Xpq126t@?Q\7FNf'2@ӗs[-';"7Pzy\KKzDU{휡pp:.q(V36qNxMIfL i{Ӟfq E]/NQv7*g<+MX"uM"?Ɍ4+>]cdD+rQZi#x>: j]n~[9SWyS(_e;D2l UNIkdݑMX4:VQ, {%f =~z#7v Do"d Fܸ~"ޅJ/nXQ{qq=~W HN`E瀂'!7XZȅ_O0?>Zr;8Jl QEwH_u,F.wD]*s P:hOu_c aVJ`an\TN(HAԩVH!:X%Q߇` R ;C j l`b V0t;Yı>˞7GQ۬nU[^֥^9z2Slm|y[o}W>bt1j=HLps!>XyaDz3imlToׁNR]Yg&jkCCqiT T%>g_ק C Kuޜұ=ڕ5ף?Bj ;ίJ03KDQ0@.vjvDA~[ +Ai5JrM9ۥ0SpZݻƷ:OJϽl51lRmi^  'p"^Ęg^(ry1sRxEqMlk&{U 635E>| t6ȭi'%aLu(㺿,݃+YV {Dـ!b-!z[Kܨ+Ls4ȯE؛źm2} 0#=W 4Ts2@ENdMe QVr;ј2箻NUTy"q c#@gpM `XR(pOL|j*%}:jK8Z+Ώ.R&/r^ؾ tz?rayai)߉KK7 7nK+פ L3gftB0iѢ+&:.`~D M+XucP0M"s wx}/⨋[K+ n2;R3)siY] oXY(q̥x*wUʍ7< KqDzoXx8A^Z{VDH쮣AXGpb=ZAክ#ɅGƲکKq b"Ψ1I]cs<Lj]x7yV/_>&ⅾg_HfSpYQYo;}MOGcT (rJa0 J>Ds^e'r*pڹKoڬ*-xv,Dk~<}|`\@h8&#nlP|SIѵ|׮%}gS}Bu+WhR \dr%h]r'^3oV9rLЈK$0W,& ǮLy35IOl #cYhkKr%s$ $̘ ?:OB2աӺj?/hҊ/t+TZ]7i] ʫK~<$^FZu]7y-EYmF5u쓒<#~O8vw~Ir}#E3k& Mcr"Gҧ@$-q|W#*]};,eI#vg8-^}s>_/oT\Ba(ᰈa=v m#0Sk2٪ w3㰪VT}7H&Pe!2_7$݇,%Ͱϰ4 Zт'$mt.5n7bp tqݷ'^%0X|m/OvNкs"kViVSKH7e=kj;7LfQy4Hv6W|'z'vN/-ZسV jw3yfT/4ҚxDeuv:;B8:0,YN  '(6./TzY 8UX`֐GNꒈ>\˭(iy vJ*A1F^-J\ndfynP\o&u D5!L*KK#7JgSԦz^#A_Z@w" btB$}Nȇ&x DSie }q.T\.="2uY GP}+-j *\F:6@~ֆ`]*oey~D !bO2˅hJx t.)>tQ 6[3oNyu緤1mJbHB^%Uv`㤵j'^dʇ*b]Ya;ӆ)Jupg@C1d8 =bLlQ;*(tBFobS>QKsvŕ0S2?X@_ggY՚:t^87ieA-EuNv# :e (#'ŝN C|ę,#sAx}7Ϯ% M`/YtI_|pgr$:L}Z{zT1^Lr)Z= b #'[Zm6ӮZy|kkc޽f3??Ȳ.MH.rc 5ZԣY}Zj'`˿kUhp~ji9\jg\ cT\*-,Jl.ïqJe'tL4_w"nȻ=Ivz3!jq 1o:DA"fE 0T;*;"X/h?Fw~ Bs(ڬ7ⰷ N#O_hB>,0A/!6m85չς ۟skۮ{Gc75/ 0R\XZXX*\(-,/=Y!K!zO9ϥ^Y_{)h_0arԻg1[ZY̮,P.A/]2p ITFi!r=D2fX)q.mb8@C0Ƿd0X"sp0u!a\o0;s+5-`ֵor~k^t-Z[X8kJA7ꆠ< _ië́?=:[|+8E]:, >[&DQs/ 4{p4x fJ , Ρ77f y0p%Ebj W E8W@uAq 1mya)7o &3|JW(Mo"gapqt>9n.Bpfw#i|BgbM54k ;4w>ܲ[ݍjUI3Ip4CT'AO*n6-DQ*dMnd@%_)s0cK`kZUK !ou'CbW;$c:ȧx{0jp P;рf7Prꉦ vg̨_SmrY[7O]2FFlPs%I>, "CIk$Mm:p #DklV;Oտ3;?;oWgwj`@%Ey1}&$G܁>Vf .v㖓>dF}hKJi҄ҠA@O_jɃ lbJ]ːdOKPȮ^49Gd֩aTJ-%)lɶdɭ=]@H.!a $l@HI ɗpYnHB.…ܛ ]Α䲪< 3:>yyTW&Q(5,#=íóCL•?8PUUzxoGJƃs;U_r+%ýz^uP%2㜫( ݔL8\ސ ׉<:2e0^|[Wm;^rB'^"nwprGhΞP"nn˰^WMIdca|i)=ڕ8M~aPR(#,۰pcϥXc<] 箪jEQ#$)zgj>q%ݼ/i[,*gZ2LYG!lv bf!0Fmj A02=QLNFƃ>w}΂$yhm4u1߹Pj+l\ YVg'uoB%5\dӥ&J^TZmA"6q]&H^DGoJa]cɫ3pk,F5jKR(o7&)Y~,tH5 /-em1ʊ_5sRb93y؟sY"%M6G]\J IzlЪ)[\P~ 7#)hNbO[Qe sRU7l.G^{cfESy 99=6g~Q\F/zle5JT׫l~E Si45vg5˙xBSkWm"aCtP*NEP5Ԑ-q.%i;AԡO7AL !!:Pa&drrlTDbpe"jmUaXBP[>o0`)a9,szaZ2P8hr!U{hb6K (3\Rc0=BiPyB dZ}Gęif>7I}j$9BW{/fg YjEΓHMgǻth4uշh|eR67z:t)ų٥'>~! 6jisz^ma[XFO6'yt]C!? C5Z ՛M"1f6U}vcҚD}iN4>%1uz}~,KwCkAxaj!=kEl;};cijU`\!DY7`X80ژQc\.r\~;N_ 6;wAsڸ-l,f㭤żPllh{P$}BxY|"'&/"SJ~u;1^đ/+)M}`7+[3i˚pBA0\VJh}I7В Gs/5i?$q:FGgصa׮<ڭd9o&VϘl2EOVҪq=eak` lBj1)e5T[f2GZc[-%/Z{&b]I|7ۤ=)P;XrJJ(^!B]'0{1[jf9(6|ںRV=HVG% {-*֙9҄ԉ-RK$uMEtWӖߺlZm@(1\bߪ wث)V3 jӂuq(:| 6кpfF@aEClx?.4r]%% >?\x֧Ni#h%ox {'Gq9?Z#o7iR'eQ2-p ~Bl>Pw3yޥ0duZ}1RݬQ5rN܊ ]zZc-q5W2KWwԱnd*7kMx1 v|"46Ԏ|E$hii/ۅ=k%<Hp..+-Zz+'WIƥe2|>ouh ȇzm jF'_=},CE.$[VMn'UBGyS@T}Api_=NQ~]#WqVnv6P-RL8ge964 !GQ.N FddNȌ8|j1{Z3#Xamgh@}WLBT%S  gĪԇx?9Y&>0L8p.0תMjdf)㰞L8N6-=Sm_.W)&q d}N'=,ov=3cN^Ƥx{?bFP#zuVpF➇r$ :y~IKJ^X+򆰞67E $[$Y{Yak.7A>~ĝ#6y@'P// hVjZfnQ~Y#Q/Tǿ|(~Ji2Gi?{35.v#Wf]ڕ(rz9/:=܅(ב$ܷt*}.snqq193l{d k[/zehI#I![+DRञe{w ~RXGŏdC s3Ԍbw8p}Ć6gE~2u`pl &ػ)UZ~jJʼz[ թyuP @}v[-BE-GcҘW %A GQ~Һ-5m쑺kLuuc<ξIwxsP3\`Mf_qo}@ 9o骍j6/?x_%o<870Yc>C*_Hj`e{7ⵏ-Z!NjLʿ+vڤ~-yٗ!-nom7#y,|#~j,fI8Y.ʒUX F 46Y?HJIL7n`4p92} RǼM*kFkU.q|~{z 8?ͥ#wz?Yj2xu(f@:Q{!Ç(?vL'$=#>uY!; 5qJ6"cdF?i$2Q W"_;)JbV+k9u+/y]9o7{5:{Rh /d;8y*TbZHvV, ~ۄ!@!?W֮o7` ~5#tMbPj)1t Ѭ}\: LOw7S=~%7)DmcĮ{ŮqTe6OO49c#Uψn^luCZ4c|@R6ܔFHV S[KF4"iBx4d{R'D€Mkyo>.vPZ:p ja&sjކhW>ktMFCK+ v[&gd6Qa8SQcl(dO=0>DZiMPIN't8!b2!4G&?$G֤C ] 25AbL4%N4ISkS5 R4N_KR;췏o Z1ܥG͛*cSLboGG}5V0CSM]wHJKŬIf-=%!u.h.fU=H DTh뇇N|. wYTud7ޟBjyNv{qQKBݙuX(dI`a ^ hսi-'3GK̓ndvH`2]Kxod&d໑!C#W%>ISMi9h*! adGYa D#OX>vf_˾jiS z=WwfRV̯ˏ˥5v@Nz"ʜ>! :\C洿,2 .;{a1tlv1|.bx~',SYO&&#ٝYe[lOM=9X-M{ Pÿ30 ၣ&:xjhsP1A܀gs3a:IV<+~ٛˀteڲo9!j+kuڱ4C '0QaGP: Y_:sl=FF0 Ƽr^9&#= >OϘ}7N}w+qʾ[!q׀ ]di,'Q( C| ŅťQ[1p`2RD8ǤX="l|$Uʥ_2Nx.s{b#,h- /~i+" CLn 0$I'muoY7;v6oWwe^ }HUi|6N&Eg&)S ):x08|>5?˞iQY\qϾݸ<'wіND[$&[d!}e\up冷#n TB-ap$.L2B/@zz;S%B]I/rųh1i0^lWK:c;r-]` KlFOÂSBQNݴSG``x9ְS1V #Q:G.˜տv-gfT+Wy¡=O2"&p#O斡r|,Wd}R?j:,ٌ 6Q m;MJ['&vgD%NkjHCCA/m7Su :P$g+!ڦH( FTCA~Qr&`axnY,=2i3_e/CZ:Զ;}U;="%aءס^fVYHSTZZjA-ժ=LO7J\ίnO> ;J/D~Zw[}-Plwf+Qx$(р4dX˱ImF88h\r Î\AOBV.Eٽ}?i6A+Gi:I }%W`(ڽnT)p[.L}FSϐ#8eaO ׄl0!=;@oC:}6!PI(4 phDi -$OHrN5n~څۚ#qhQt\G{iI1Zh%II% wٗ!Fa/;F\wnwaVD2xJً(R)y7x"eo@߰G@)7\eqw|~XSj`WAkΏAҼCB=<@)tG4\ yR۫Étc]̀/LDj/IqHR0d'цfyRmr;pWN]&ؒ|7Jʼ\⠒Ex[qK<Ǎ_ eZ$s 82@K;s#[}oIx,DJ4ZRMrMTdEZ~t?~_W6+ןȚq08+9h,ZTR(*A2v~MJhrlޜmya Fu}>C^*w,Ǘd!_{nC;-[ˈ׷%Diؙ.x/mYKߴo<0@o'aF0Nqtx}Zb- g .N+hG0B]'NئYA"Xq$x~B  IPƒNk1ѵC}P|w'ʐS u~V%&6AyVc.j*8W|4o ŀS |1{p{#zGQE\SEޅ%j"KֿThmTX{爛 ~h4D$B-<_s&X_s*g雷$]$~$@RZ$eӞ x 7 %$)#&s{Щ ؈W dKe$=8bWdj}tݏ|}w\Y,.@d0T]2%L9wuX3z ޢ8# $/ dg$Ȇf:x-e+aj1> E\8 a`ǒԈ#Ż!>PK\'H֜CaӔ_͇[A :$<0fc! QUPER #6ctS +Wү!Kշ%{mѐdгi*Zo47ԡBp++•zO%M}xZ-cF3G[톒TwW}^Sxs GimsX!WHi2|{Bԋ);'Y"q5He)8jWx9ZAiTneE4MTTEڝBVA.@⻳3V(-{+: LL QqsT/hxCP=G!]&( OXRVP #g'z~h!+ ϴ˷ ] NO"ڍ /]Q8ςO=J+\n<" hq P#$(hyM>7PP 8O!A-PDFDEHz.S]VYtH,f+7O.G(?bR$y89hgǖ"oMΩRx2H-Fi/'h1%ZEQ{C/ B%Q==+HER*ybvO 8k'ˬNBgG)[?С+)aqyϳEKc,9syU}7{D"vA浽ݬ 3:Ӧ~?͗#\:5eNO4"gc_^řPTrhE}yޏy\;.\Z-i,}NM|.Q7֛^FlSX3ʜ>#uEwEYOWbaH RODDDirZ䃑ÑXogՇS_zsrZ ۏP9k+U=K)nfl{6|g\brf_q:;@M⨄+AnJhb2dJ  յI,PC qNl/Y@Am&$HOd6a/{WʫԺ<^XA_cCdP$2TcmʟJ>\8s .;̂w <85AĢjz h%'KښepHR:rKz&G0YOc:la~"_'O4R6'Ib8xZW2bkS8.i19^I-CyUZ$&Er$廦MuTj%xSʹR;vib2Mvzr;3o(|2DҾHxY>\m+aSǕzvg FNjh=7I7I %nGSȑԺI{M̄vsw`εx%_}G{ܧ|~[朷upDX3f r蹅NEn(S||0~Ew楉cKEo<1[W+۽|bJQ&wveX:zÏFJ`#yMh!527˚6ɽߕ\>Y_ (\u]ƿ ps?mފ|M tCedXcbb`x Q1xLC'zA!1x;/ ,E-djr =6t3Q3xx;[M ,֔}Fji\!DNUV_2י$1iVڏ\|ij3[ϗ?ecQĭjdح2 VhUMp,+|:0. =Ւgrw9oU:[_4]GJŪVt鴂TsiB6𚨛Qb= F! ?Լ}٤/hpQyʓ77;"b[7r/x,İ$騧@1@!xcx50Y|DZ! ʗO0m,&5x?zC>/3m7BQ~j9RAy'ZkL. M\/mjv2Ͽ\ nϿϕl)nª/K3j Z/"> M})s,Yv!HQ ]ĴdȠc9"X0A$7<6IWs\ԪB-dS,&@Lu$h6SL*jn \LBg(Ox!~J%.SxޛyBc'1P 694& lZnC)|fnR2Ɲ%nmֺ}8X }|:o<'sNʿNW6P<͸>bM:l rrhУr5V =B&9b;t5*㫟/^+8ƭf4tEPZ~KHcqƣCr$i&a=H2W!+ N*KWt"*mMhm!+ntJQJeP}Hn-kk+4c?ƣLqN.2LOrȕ q)NR2[Q٬)aܱϞ\KampR Œ &$EfZgu|VkB1WM Gh6I:wL0bR!bWH(* 'P tx581@`6 `O^(vw)nYۧ|w@ kJ< WIbYYyGqxn Hl8h&8C>ѷSڒLدxs8^-R}E1Ah&IdVaҺc[\ۃ`B0SKc {`Lf̰yR"H0Ū"e韎H 0z4o..-e߹" ֤+d<[ +SS}ڟɾi f}BNǭ'9gۊ m#}VSQQ94(!ꍃVM e=x|t@(UhilH8-gA@G1:C@`{'t6wR[_L/Єj/ 2JO.Ϲ=kPMH㨓oƏ(@@2R"{x;f4%ŗ)9@_*< kz dfKws5_~9#\QR;ٕ9ԩĤb -#sKF9]n'5ޠ[KjnK}ES]S)C( S?۽~Uu ؐ/j#%XExa,4AOMU,57;z`dRNcӲ>Wاqا9Xsuy$ZIIKJ jzy r5rYpjWfU.[<^^xFl g\5W̞Ơ[ɀu꽝EMk>i[t8}-ZoƔln^|z}99nbx |mئrnߨRd@J/;x|sKQUjBb~B|.}s.m 63hrbi1)av&ogE PN=V1$[&$SB8{r±kau1aCdd@1A&2 +Ǔ*Yu.ITظ̃m.Ɍ'ln@ ҇m `Y[Ki Bb 츘,E:M5GZa]nNETm:#nK*^퇱dGZG 5iDv\VHH#sS<3=>=BZ(˲vdw]m rz%jk׫5ڻ**Z9_\n-%Enl^/{n@+K~"d|Z%[j('֘c>?/)Rk,yݹV.mnWno:5o*+nE_VY׃rimmscKr+Wܖ]@kz^^ o%iör0-VWмMΙ*!g(q7ʾ;snoDP5Z!WUr<3<4LA1E?(& }$*ї8XRYl8[uͷmQFSom:[~~ s]#``l7'LEr':!Q뒾!$P+>Q,[j])Ux,HvinG |==?N@KtM97)fVi#^{XcKӗM0 %L$ Uy^ F8:p R\rv֗ φ0:oASƔ#0URoSj btRr@7rkY'f/B;Ji#:b2*L+z_ȮcFV VKmI &1PԈ58Z\GTC&OZPG@u/TH3}EDZ) 3ُJnUu ~fWʠ i4Z&8:jU.56!d]D\Evp*rDNN'6:9P̲5{~w=< (bunAj=.,3;9}BhM2';3}Q=$_t4wYr-i\-oH [Xʬ{TN^,8 |za=ЁQy&-b+w ㆈaL IZ*޴< A)xC;I1g>Ek̈́17@IBf3&tM(_dzЄ$g"` 8h:pc(4>N롌,ُyNB5Z0 >܇>ڡj^HPaV05 jω\!>#mfi4h+*̓gVS@M8:&,^i3Eħ4n[8.T37V:U51C&D2(Plj+j_6֎jf&aB0Sp)8ҬĄޙNe+[b aF&ePMغl3o&xaZd|t ǧ ѭ7XMaDd7Uo9uBT nc#j`פIOSw9Za$-Zғ:nxs58@x]-so7 xv$Q.. EZk8#.Gxm&nYy!IKA6{1`dfz3|e؁GDI@hlf4s㲿~ŗ#\HGtǀi5x(x^:^^XKjeH2: T4dϝ{D1.Vmr1/ ^*/KW94lQ*rP ^z|}TMShSe}Gr}=W~9czsawf~`yi.hPOZR5Fqhd- x@(mz%p99{f/^EGz>Ei~tT-b|ǛڕB5Ȉ*0lDj} P481#kC!`6`Q`2fQ\<ͦ#X'|ac&!00,-[bBMzl7UoY"V#7 zC-1} *?9Jt{ͻ=gP+;DN?}焬' iRg,gI=r1`W8 Q\\?†YT&yC 1q}G12uXc2|m8q84"!6jE:9E TTWE0]¹44`{~E9N_^U(fge7A%ўM#xY>{"1:h=6r YIE3dp",&EntG'Yycjǒp"_6K2YLR۩88Ljg9@^( 41:- JG >7x7xOWF(iUF;9>ME55 A^ӠvCŃA/'#a B wj>'Uٴ ׶;[$#3tcO{ 2~_ %Pf)r sŖL X2C'!]*2" h(/ 0y9^sW0vz:~:5$T$m7Zn;NLh^jsJc3.N:9m;"D9 ұ״(Iުn ޑQʾ.؃oxHRHO|9Z:) &FodӞYL$_2l푛b ~W4թ<ڢU 7@X0}0ҴḮCLdҥ<$ާPDU@ CpuHF] JU,꼖/~ܟz9]mGYpCkmF wRc*$$yuw/g3ٓ\JPI%&gVd\8LVԢf?asH󹥥rigOs zluRmq9[4r] 5xde^#kǴF˜||Ecs ՞4rϞ?q+N56hw'<:Sf ;˸5xy B˴7^,//=7XhYZi ڨq^ڤVB{=7$?ȍ  ͑@АAvN$zξ#j^J99HZ 7UdhC dx-Gqd턪\/9 Ok+bƬ$>&iHWnxXov/۲cGU1_C2 }jiqѡF/Z=HUOG9)CVgBzF}1Ŷp^,ި T%Ky-)$Ѵpl>d@bpUƙEêf뢉QewRUf9ݓ{Nwb֩rZʘ}\)[arK՞4PݺJH)#.f@JQg.i_!?g36sXJZs#_֪y֘ wAAWFl}:CGa1nt~D o#^{eS9 G}(["tc.#8*`kZr~l$tؽ*d/ OƯW|u y}Q^ T/ ]LFV':X>w ֪%X)e"〗h=Y:S([E:%ZX6JjU%#"ٻ `k&dwwNZ$ o:'M.ILBMބ+g^#ϲu Hus$I TUT_^hPŒ M9 3q0!L Jн:t{ 1 gguήRvo"/H!u5ɊafLG&C%Yp&^KD#?!Ҵ`BTWE3vj."+d1NxFLsL:ꅛ̨=)oc8<֘v{_i!F/*m6$[ |Ej _Gf*Y&ذ?"vEK) W8`fY&]J`]r4qh!VoG QGL@)GG\ B6ĺyҨoƾ36wl`^Nh77`xn1ݹc$BeL((b#*`!NgяO/23_\l&, 65OeWw!Ouɒ}M٣^NG_3V~9X_V8ٌ *y 3U39_m|3#3kӟNR@Qm08U,?)(NlRT1\ yE#_ %,3n%IqdYzL.d#}@14ٱrԨ'.Zc1hF收,o6v-=@JN^âS\%6%ދ;^]@h>K~U|I:Z|a'3oP$8!58R봚zEDc{ޯyjI>^4)ZSf!mDYn9k[rFLOvF}7E}X](V W=g(F&:)vNJiu`66 1-9V=u}{}R۸ZlұP %||BlDut6&Q~%6;$^w_s"CBwF>3K o r]=:"U/8jy3?ρoJ?-Z eo &a8dM…#kf%zГhbݾwta=Oi~ZYzT#|Ce}+]jv,Z޴{M[ > =3L^hV~W'#&wIU}4 L څ+"q9{X4 ؔx7ЃK@v8&bx^] +uA%Lh^[l wAg`I-C=,S Xe`41`ƍ`Wr弿YZedNbsen\{iVyZv%J$QUFZ|B4a/uQc2-{ tGy'`̐TaV%SciXW+' ~ŋ!m32znﱔxo )͎&k,OF\$'`7k[{Kin;);0Ir,LBi]1KtRKx$Ri@#`9L@SKI=M+5ַsM攊<$e1!P|wu&t2)&[2ʦׇus+dQׁhEbLMFvKЫV#EopM zogD[9&"/aDǺvjekMn7>oxV[eP$?VoCK?CL~:Ś {9G, tr1<>bGNw@]7IFN/7 &Yùգڰt7oρR(@by|S .Zyt;nB#! 4w4y!6QhdqWW/5DD(V'w FCsW8~bޏ=:l@i٫3ُnPZ_W?OJJk7ۇָ؇AX&k#X}Y|U+@٬*gb` V{&9ZwNgI2R$>MV+s\LUGzqM̩}r)dr27Q9MێI֮:m*`рLo7P&4C\[;.jNU-FUU贈7}.DM'j-낣-R:ֿWW0{Ǻ.s+bϹBxR Okx-ZVr#Ƙ:$)2OjzcհR[*]PMtU=3Cr. ׫v Nc' Al{-PEboF/stݪwN:ظ覍hz=UH'ݝ™n},@ ӷ7[1uȲ'DmC#! 8PNjGZRIv=+Ogt4r2SB` t oM+a8pu2 hKjqL\8< u?[p(U R|6V^Xf:fƒQIsé^KO foA,T #J߻c6ޗ#~0 C#p*b4S]I5 9-({ioOX(na |GG #Ea>g&ۖ<$𝈻fRw{jg~56%ŕ۽1G6FwGUwBk;;Ce6Pͯ^5GJHܕ r%pZĸm {Qv~@'>k {ziInrPT@>2K,)c*|ttHEFU38R)%ߤԋ)zQb 6'~~?e06n7m+sD&{^IJ-4{=?5*_!3$GO-6gAsswwI&낫Iw˞o fgo`O֧ҌQ;uRBoy5 q>H sQ7ª"4$4Z :&#ErOx-4 %!$Hdt` +h#CǨbMSH垦w &WJ' *eБOJiq7~Td-JT඘@ՏXn.pz~W6+)W.mñ.#`bYƲo7_X;>뷫&q#G~xmj,@bkKU^U]2B41VꕅU\ ?dn rIO'QG88<}_9^qRvR] nLZtuj#o,αJ 4^sm8ɿ]cun -"QD,(ۜHBǹ 7¬=^>pn[N_:ZYVyleO-DG I,ݞohF$+ `䑘,̓ zJh7渪"C 4:tгuwV[m>KN#قEؚxLӠ{s@͋mfv0xV[&*bpȆFw6XE[043e)>h?i׋ja^ݻ|v\4zNiTch7`2*q)VO=fc~u<(D\ҍL{ڄTw?C!N̴Ҵ.&t=~R? x3_޵r=jes]/1H14w6 +]V5_\ͯ[~4ak|ң p.e! #b(tuU7|vBL퇳[VUvsaX \HaYzSɣ/\ i:d"d4|t6M _!)5:{ZG*J\ہ=ZA JU*c>K؅Z;w;ӿ#$egikov'։(R*Me?2'`产N=Bvé$HP oE\7mbG| :; zseI0`n%-&* 59pWC4ŸsKaiFl}Z]\B\VǛʌwVWTJ>֒WU h`}2>޿5d&&3H;D` rؕOn'%gA#ϛQ;Ȋ3޿f(K#c쇇F;AV8JS[}Ϝ1H ޘA<&4xx_ohbyi1U-ךxFѼ2 9uv,A!ި?N~%'gxcs/icVMRd mn!wWi3vpvk1 n8f!H у-TLd&F=[3JXhE?+vՋ #̼h%5g)Gr[%PHׁa{H:y+>gZ8T`UD!T}>5vI1RcdD1\e(!&+Hq+:=s]Eutu5Z4v)޽ODv^\:|\*̹ q/Ds->*~4lc? Qn1kbĘîNƀ4`S_nyRzðD6 ZBKETR+Mf$c %Q }WzcOO R6WnƁ7}eTPk5篜+GB4gX? ":6wm- ӕwx69Gu M^m`qXMqI}!\LNn-AGn<眏U#GCIwdj 9\'lü-1|Ye1<$ZS}]W/:5˗NbT5].^X>w~b&|? ; ׮W_ϬtBNDx#_ ]8MB||;d 8M! zkJz&W] mqJN&F/CJEKg_ni[j붤[1<'?>aL5T) Z4%/# 7;%q\Zɯn/yə4}]B&*zU еX}TAO/$Il X"M}l? (R2MWDׁ<(i3VCgLM"j?/=SN!vnp"O.H:HV-pt4_ZGj校وu!zM>ULNݤXm.W|kKlGsv2Zf=U*$1eN+z-tv/im6˯ WL ͸h}*XdGMO[yI28 whEw]A갛9U U܉LN$|4B_CE]dBbfbg'sڊp4``a˷h}Pٌ/[&O'\5J!?k(9B>9\q>|:*=D0^to|{&:^I|'g̗F"ME&H,>I'vV;%eE|bg^L@g%z}~ Sa9r4~C~&`!psR#;2VI(66dYOQdbNwnX~_::3y/yRThVc6eFZ$8 gD}./S\m';08uԔH.oJ12cLGftDa=c?a~&Oa_p9L'I~GGgRNQ >}gh}3VYۀzzRQl% ıѴk5(kRYhw=Wέ0el3jq=ks^]/&} H/aL8 ŕRE>&Ռ</\UCqW+dCE({yr/h` 6Np%x,S\RlD=AzhxUTh] PijsB4=(Wq R'kt>40JUވ*;[ vSR P0I[ }a˲A )'H8]KlbFoBW߆w:C[댩X)tsԩ P U@!@o`mM}(2أ׉p)PͶx9J.ExFd|Wm6%6vr8t_%YC_3`gOQІEw)4 W0u-k6GLmTjzkȘE׊,49VԩOۢOXae\|{NhnÕa>R*Or,1W GJ094;4B N#R h` kT8>RTU{[BeqIB̢!,FLʔ.EA_Rp"UB R(I*D>"L8Y eۼA4"2rpt:F2cPD?D+n[Dnjs-lhQ(`#iKS[T'=hږ6)͇lDe EmZ$_P r4K4aw[&tg$SJj75ۄ<ڸ_/o'|Z~L5k/a|'sm9Zb='tGvV4`eK"5sz-U)GB3[7"`SfN :iN텬6] @@Bp5ӸT})FPeTy&Ŏ31%-UKj>݇GsPhکt]A1Oa%_R&~+.9gIg^H{ÇGn U'!튆0J̬ZhBM{/Գӆ٥9if1AZ`7eJ}v>pebr\VfԤαvqH~ɝB9#hPUӗƨp7PZTrLCBКmwnÁUT [5F/9r i-NFjJG`\M!xbs!n%1vQ3RU]}ƴnd. \E[N)q9HD+b- E ^H!:3F(<6OXLsP]Pc(No4k3EZ)qKQwrCŐ-Ɯ4tiЁ#Vn_PAI6ʩ>XPBo p%43uuq=Nsxx7hlgeƱ۵ir6PcFYh"m lL-L-8%`1˸.VUTj i@C?=W>IDaX& &+4cb4T@. C>FAO~%Ef_ض/Q8R }x9m%C$φ# ňIgZލB61)8Α4CJ7ӬVK#U*f201b>HkxK7?% ?~YDm~['_V`wlWc?+k+)$|?Q9A 6gˋA"9")c Ynu̠k!Qa(sE'񖹎Ro(d3F F~Z9O:0s*D}ߥ`1AbV_r¹ш x J_xb̡7gߕ۽b ԥft$L9<[f8X[sKcG œ4$3FL_ݢf7XJ7ҿX'ۚ(!>3iu fHH9}pC*.r{lxa29 O rS (!ʒ}qXV>y kE?^މ#/\\)F4uaG`QM_B!iw 'ʻjkQ 0L(^di;dD塳GW[mOrIYQq5T:teZMP̭BZ]U^ Si{fc.6mspA|2 CZ_O-64:httz1>,œH,̷xsUu*xުz<{iuY ^Ğ$2 2g ~Dbw{ \Rmʴٌ۶1/d[#w݀ d>2=W48WJ!ܷyiSmW jR},# $vCO [8!f•1ksNFڧ?INQHeyYP+İOj L;8^=9 9|Av=|PvZv~ . `"8g eu|rh^ m3V<,ׇX$P`Kو2luå}z)k,)Z~]o~밟`=ZM=?YޖNxSt(o>0ق=kemCB= Q_>rӉO[fsL{w҉ʽD1Y$dqQS >ħd4eHIn g@@^n E3h 8iиJVo(FB{_DJ!";jJkSsH.Y:?4SBz?ejZԈ_aʽ/ЭqLS@_ܽFܯTs/xz`Gk4YD/кA(őQT'Hq*.)s>}>tOg ߻\?h\I nۙ\ԝռ7̺v)FTFA"׸W'͢"<;f\1f0݆PX0 Gi1mh ,n:]Zu! 5*4e=٭2[6 c%aPD cl3 ~WxZ^1QZ{5{*8,ǞOdD'7'lj%I*w"@+'R Fv*dӗS07A"Ϝ *}3ID>O.kvFG!:NFC=f!6jl%PE֍L(L^s^kupG踥NĕPߘ5b!Gq, QưG=@ Ô.xGo@6dLbd\I'|v8bs8/p2ıy*ys0&98/ϟB?Co{ A4._zV "75>Vb[MX:O/zBz}X8(I.rg ϛB-.mlf|z{a!>.-,O~= okVJ3&NqWت [r+$LҷLvK;SӚKQ#LD5+ށyV1F)|.}s. M7647*5?}sFKw6+0mrXAФSG5U7ufԁ^V|{}[Z#x{og" Tz' SiLy1쵻8P> kJm1 l7q{5-Dt,r[ z踇y#YoT U ;`Fet@WD4vhAWhJ:k'OMÚAklhWttWӗ1#pNR!vf ]?kݑ퐄։U4e<;٥Lv~{A0:P&F ٲxXA> Qhsodh T5 U%v5վ5%{pڨ*ؗ:Mc tbl%άl !LnRX]}'497y HUf2iW,cNLг{~S.j^9N:cW]_lt4*G)8~&z!" [YqԨ=lFB gekaR5Z=hti $Ά;nmȴ΂iPiy+&S9(u$c'hKʫJxCk9|B|jV|CS&U>h2E B@Rư/ TB/K}3*}K};kKǻFPE!*gTDatf=MecK|يuLᨨ^M'v E@>0c8jj@n- KtLsnh~3P-V9Ѹ-m0˩L(:sGJy+f:jQH'C <.OTIq4CĒ ېiB5#; e[IaY00_}tޤ$莙X0i: w5ciZNꋴGkhG4hU4 5$}opzo8`2a>^G]2/-RpGnJq4za8Ţ>10ʂl u(+5!X:pd'鴶(4K  uf%Dkg ʘ0qE 8iq4*7R2 TLʜ$M)V 3dhYU ꍖlf4Jj*٥鳋ˋU39iri0:ShXl8 ޯ8#zټ ! v!*HdAEIdU]V1 .`[|ۉݢ^7JWV '72T: giJcfR W>1W&ݶA%|HLEi|Tc0&8~v+QzT LabcW|?HE NȪ퉵7$>H^sQԁ.4EuұƼ }hz@1%l͍NxNIFz(XI=7k3!!`8|~i]jagyP53vG#ިcT2'= N>(6a8>NK?n4~ߚoWoow4KkUqC35G},6=S45+eWHw&ƘѕI8`NioNpωDτ'BXk:n,'x( '2ӎQ2O /z{9odH8*\[4qRmS\ ߒTimVw!/]kyx @sQwMUJcb)f$ƙ !::NQlMpfك%zیDi(fU Q%5)hv>g,+%Ҏ`гbD-B Aȭ452ѵ&7r MY)dIl6('&ioծYm@PÅ>x|xa|wTwdllӦ`z[pig-e&:f.gB3}pśpӈV}uL6=&!Y )YhM7C:MAOAyqnH1Tu~R_5̨o5GP lIA*3{?&,t&GԉPtLK C-24^X| h꒻+ȶ:XC^xT7tПci"!TW #%G'R6XaM/Ժ:n on 9S I#"?_'8ٗv9e qZ >ڑ&s}x5'=fݓ>a:"IqAT;aih ?Dseѐ1k{d&d 3" 0^9y4: &fbMH(j&;fsXIÌĔ9|< ㉼K]n [fQɶXI_/ i4( me!;c?ޡqx@;DjYlV3SN>/eu1* Tx] jKqdy<g[O>J&=o*ɕ3| =8.Y 4SQޚ(3O&Ԉs1M ֲg 'A C"ϳ@:h%# #9Hɐo#\Ԝ4fylz(-FewLތ2θTSSljq͑-$+=yNhDRh<Gb.!DHaiKVqWjIP']a+6O ^W(˲R~r9etqdɻlCm@Z{0S|T^U쒨&|̐aɘHI: &ѣaho,aqu5}{,:1DW5&&•"vn|Ǹ#JQ|eHae\/T}M^;~NSn .Iڭs$ O_]DډpҌ% )}+*~lUT>cl[u6J},&rɘ ‡v ;ZnJW u0ޗD :KYEPl 'Զ?lƄzJXz5\Dz!m޷[5^ordt4=}z 3i_Gƹ873&NOP\RY+B hr\Cp/S*b"tU^ǨqCzٝ-X,Ǹ1qS !ŭÁZsjCW$`iu(y}e_tTKklp&5~w}Ql]cEUN:`G70RT^Z’.f2I0iG"+%ׂuϊFz+:c eM n߈ rF2'Fū-algrĉۚ7bV{<5cexF߷לDOUGhH 1,!c!\m !P\Jı aC N%&8H =;coJ\bs %LBHt.jT6I[sQ&)iRD#ȄCNюpBԦ 7YgQO@_]N>턄{kfhk 6tT2hh Bf mȍ [|Z{Mޥhle"ͩܨ -@S|HSn}Pt/K'Q"AHw1Ǿ M>OecK8f6iB8UDO /QՅ9tǚ hm8f|^:moG22,pb[ ;u3(@ZI^n)L۠~MlHOL *qJƍ 4{ \'"Rd"%ёҌ whco`2>ځl2|whWo;fO ҩcǪYS_A,f֍m?TI9a"4B*sd Yzfm$ -IoQnyLRHk4VCɒDK_AXp_|Yljhd+\eCF @X˩LB_͝,),8 "A uj,b7%lμme%$D9#"H;Tci(Gs>U 5*$C:{ '<~&ۇ@nuR&ؽtmdEB:%^E&k~,Z'> Ok['$j1N%G- 3:EґX\`ufjq\] HS~PE>w)bpW&k7pe|Og=rbQXA1Og<}$Oߤu%eq+0 2h݅[~;&nIϰҋ06ۮt1=X8_' PZMpv);כhm֖r~!WKu+-(ؙaQ \w(9_HW'ʰ >jHD #}WwA(mARȀ/^-B 'j^>l>v.a\O \RDq"u9FAi5_E bI$&^N )DG0P~w9twKp"ۺu$1>?EVjZIf}H?kO#V6QR5+z=6Q\wPrχ-瑇vGris#rs7 k"oAqq%s fªܮ6l.*qf =~s@AyGdO[du5>c$Y+Gf)Gڝ%,X嫎 BzP?v#Qbx`f|Dڳ1⇁>NmH "$3E[RwkGGjH>@W% tA hyrfԱeAb+2NFD0eS_vGe{VJZ> WAq%9Ycs\}Ն;ĐHLL;>fx&3=2y*R)Wo\1#HFBz-w+;#,cf6 ,e0x^7#xW "jA ~G@ HjTU#et&c4$,(@<8:V~Hqhik!gp/ vo v3!D} [6* #t p/f;T} :ˑNoI  n 4.lz!woXvF^ rRM5Hy:RH:',K: /$Й~!rfo0pF} _Y~tM#'ܶgfn:~v%;7KWj)c^ QfGBː4 =z"]4<,tVD^c4Va#FA~ Pש {hx8QMz{]\x {?+Q %XwGo p)jի Keu!m K$쏚֤s$,y¼tW[] 6׋[XkI?NZwʕ*^EeLKϛo[^V Pd[| TCgNY7r[+OՐ?:[XG'3&bͼh]ӭO@M^[ߪ>3\U VS"N^M tRsc?cR]mx>&[9MkFG=W9t S@N<ϞjӘU`$7(<;mnޭqB/B7euc]5_Es ZL}̠QTfJ QQέڤT{e%4*:rGt+Ý[r'crR5^ɕWyVsd%O @FB{E'o&F\-*a_=yӾȐq69 \lfl)^S㲐@+~liW6 k|k#W(? {d $S!ƼZrtj- 4ج@j9zYn՜]B&ci3?I C Ϟ57gCH3~uu,puXU~(D{D3.a&^Oʙp Bp]%i %=A;\_s,SY|%5ۻd{'cB@uS)D> k:~aHRlRM|lfǔli*覵ҵ6=~<}\)%/q^c!`^Wt`j :lzA1o2>q (ީGd(xø1 ˆ',f d*R[)Q+/DSao: zdoeFBҙt?"ҤRl^\:{1{VғrkDzzqj[}Gs$a&zU^ PI( YXyUJ+iU]xjy]qxqNhC[ ?~j2խW v3ZBeIS)mVFqrC WY0f I!'DyOyTn'#⧄?4q%xbj}Z;{Ymߨ],cgr?nRo}5"?{2ca7+8ip vzK E`ɻں>/k<N1I +d46:{8VYNkv۫;إ()hg 0}GHbqE /Gލ1-FL.Hjq9"<[;wcQ2 V EqV.ݬ`U]YsBmzҍO`f,g&HׄF ^W$qpI'QxKdtzP_.BuʇpDU%ݼ@8L瓉8[U&>4;E&Z4Aw[Ḛwf5+QwҭA9]N 2Av^jS[WV/ë҉EbT' } sw ̣V)\o*VXQs-gquZnPYϾ)pgB-TbWR_}z[/<hvʫ%uW &JjWX0pZŊ :|$_.]-6w K/W '}{{p٪kMl4wZJ=6jiS-pӥVUL~E%S;&ֱɊj,Lli0*E[0ͣ\Y[@ʡQw&~xU Bt >q~ {|'P:}#V*^3 8'e?<c^-$Ccg^އ֫f`Ӂ?n;rN V5nS6CAY+י9U BL,9w7)JEo!ƒiGo钃(xP!U`6R:]0|ߕ95։>\{b푅Z9;'|<iI})@&jMyt &_d|R>< UtQ?ayfhS;ys $b4Ez 39,9`kzb/*8otP.7vy34Gn 4D;hu&,x7X;/Ky{nYuVy z#3&<1tّA==GSJ.!D kKl` @0FD[xb%Yvʭ-n/dC>'ߙXA[;jf=1T&Zŋ7*}*Qu7/{sA^~c1gkCEXHސ!/o7zkww! v#F6N4mG1扵9r&r4FS2(VSiڔjڔ <<j6$@0OBs%C57 0&X;uP(*ʥDW*Tל0!jKxH(I.~tVDIOZGM3]*:_X"ZSGN96tDav ig''j է5թƎ/:a o Pč|<<@ו79[)~xh' !wc2Q{\/Uo#%OF{[GF9qj~WK7ET6j)/]εzcvjR!P3k_ͯ˪W# .v/PO'ZFI;\|Ȟ&lg`􆝖h7(ĿmՒnwiRiRB_·@`F.qWE$.＀[8{[:(#OhmGKg_vLdsZz?NLKV/ѩG06TIعEa9uuȑ}N BW-gUR;ZVoq3nV''tN ?znѼAp-ːC"ܦPGdO C E uˎ*i~W#&^!hM:,(ZUP#2T28B;w; Q\2ΨcWg`q?yC9Z]3Ш9(keH.st e6Т_aöGVm>\]e^!]#.W@0$zd4Lc*ed! Kf˥M1^nO\.dΞ_,dg]Up<2pZȯ}o 5 ~eK!BJUM r`N> ʁmRlP fnwihސEM+HBo8)rT{M^ɸV?~Kpl'-ht):QTcgM1vt٭Ԥ]qp[E(";GBcA؂&kUbb:hKP_uvGnwO, ,ܫCV%j%Z᠙dVtZ쫽CkHIΪm7CTZj-]L6YN.#/;/դϬlq{ϦE$pY8k o~R86E*_I"dLt^ǑbYi`;#ZS鳩dI DYw4鳙z~,O>pKCSdkGW2-mBq)LL1H'"/Yx/M%3&¾PE}Kȁ,&kjPV'!}@!"yk:os= Qj-5a_6 mmVXe ]ְL0 EphV~t}CQ ֏|OonHzyk!(h1WI+Q}NjttW^ztuÖq^VAK1X rX&7}HĔBQ!+ Pl8AD(D[a$}'kJ2ݎӧ/-; {{qOfA53V:q]σZ{1t>@?/ YSwEHdYJ7&[z&3%BzJ05'i ;V tq&fl9kZgh{wm=ZV[*f^Ad1S;2P3ƫ7'UߍMb%pz>Faq܋'"Гx=̤+!môg `㇁̔|&[)j`ʊ @`H{RqiQ:S  rryq[^R \HdqEfR~"fg^X\,3UrO$;KIC1y@!?*yQ}+ 3Jz-dt Ǘ\4mȒsOMvjWm#o('Uy3&Űzr}dWSU$jye}#F$Sz*_yjz^l&5ϡ׆2Cߋj/!:y]z4.GSrf Fo ђZ>m [ *uvz8[mfAj-|-qQv iu oIJ? ]xb΄b? ~7J73ޛZC7EC#Βdޘھ_6Rc2nKjxb"ރ3gO z g}RT+::F:3_ȤeΟ˜;wa)I$IEӜB~˛}-/ζ o0O^x9A)YFZr3*{ hh,>̹ݶOq⥢MNHw7(QIթǤ$_;zξ3]aۍ�/(v 6(./2|dQUiP45>k 38N<AD%`hCZ}9{{nS 8T_!4aljOoĉMYKFumVpp&.h֏Q Xu5+:;1>l.B cnG|>b8JCQcmo?օXۢ啷QJ^bD*GENMc? Ѐc8sm;yc}@yѐ(2 CM5̉ ?nFKd}#n(29$Pkir ]c8#6Qƥ۠kDXJmM?dvl׏HߗqSmΑJfA R12l'6VLtF1Af9r<@Ű3h! 6">p#ŕ9zDnbRv!$dt")EB0/QĒ]6*FÄ!޷V] ma+|}}ri\7GiZ}$et[晛 eT-V^^Zs 0>9P/?a&2\8v}~=67"4kٛZ-LMYcj)F{Ogѧc` ,ivvP"}jNU&DA cƚu! :EKH@*_No>d4B4"mlnV"fd[_qqmYl48bVo7M#c%Z Lq| :U;3qSS- My))(d"b3C({l}l(6V϶&$!Wdk:PԌo o&kRJYU o6xR1cRE9PrԮc#F'{ۀhXi5?)ـp{^魹O\/ 6ʥf9V{ 8=~>`oѻj6,3~H[+z͈߲^&@48נFs{Ȳ40-jD:J.'.wFH8 Yh5m*9A{ 5ɻ /)uy, )ȋ >) Xⷄ32ӡ?PzUȷs-Q7V$|jUnQI M*M/B=@ONkM~WgminӜ'砺^`YǪq(&+HfxZxXSfC-ЦըjU(JNL0W EG굜zgڈ}5<%58hxZa2%ñ/v}fJꖪkW-΁13-LY]\TuU./N)Y @qÌpZ51_@B0ꄹ :r0wloIEcÃCQ?Qs>$j,C¢ X~Qj9:`ˍZ/ %$C/T%g %ة$hCXh~Gb_:oTwϬxhFp3ݫ+ml˷.0w[2}C#mʞN,-~*ڈ=pbf\|zy…Ѩ~=<9f1.;dXf݊ܰф:1<|M =GyRku T"Qd Rgdrh>IYk=( d&&q>i];Gr=H1VR2|'XS^w橐n/6Y29x(p8`*j!}VXehh"'LKz b>gå_Nq xj3*"Vc16BX>;>I=PR!4A+t< /PʨW'ϼW/yTi*SpsUG[3$zK-}-"!̆pd'/%ۼ>s]V7xFzŕdTwfn%İ>aL^ᶕgcgg~]~cP2xe9= / B:f&Dcfjwz-I:_RO4/OLB?[e@ۑ(ӭF1&~yQg-h9^Y~"(6Q^- ̇3uۆڱ@lj|P6E6v5FCG޸45̍\p>ΦQW-xJ(43D ZʗW W k((ϛ9Dtpkazt7 O%na흸9!F WF?~WR,z#wjbϯspv=ZMTVKϪӡ:αd؉& 셥 ge/,O٭r-59ٙ=֪k<xHh]D$HȌ~R[NDeщS㯵V)~0&9LS6N[}E=avWb\tLIflr1X_ Vך To *cM;LN<)fMdg/eej>:uԚo6rn+Vx?n<5^o=$fkWo3A¸gͧ~Τ:NX|6"\HG}ϥ]8={Nӱ ~Eی9﫝Dn|·/2t,]c$!j\hN#Gf#VJ|e%{L5=/^D`>z[N3crs_C9> 5Fs X0c鸑/Wh[>A`czeW\nv(}iJ0–SE"LL7/'~߈pmoVH>^8_ϻϖ\F'3O?3{տ&?xzͨ~fC1/۾n}~R=}N3O'SY7F].=/_p9yW?~JJ`C!ZE\ q/Cm^W8-՟@)q5V>Žqu8W߆xW~~F_F-E\5|=u2&qWߋ9C=PZoph/S.G_V.B~+)(39 [տFY U\ܯ_pF_&pXzWy)qU5@ Пobq?WWqq{'F&>W7pYCn۸\P[wp{-\sT,-w W/ 厥0:`oj JM\}PͲGb߼0}F!O៬=_ڔ3ӰJ#z>LC5p 9Wsxgb峿nUP8/06sq%)ʙgc‹~3ŕ2fЙ3o׿o&7l=ãoL -4gfo9U޵}ɂ'N%աZj|8fq+y&A9e z~+^&Z>JUi5ƨ*JgJUBmƞ+^ ba >ᙧ+i=ST|"sϼˆj=fN(Yl6Y{srեgJ4DڭFJi}Tx!Q\Q _P#vvY |.1q+e0*s5rc(ɿ˅j-Jw9ZLU41;;JҺ3o*FS7>C;>+/eU5njpT gzZR0fO*Fڝ<};aݛekkS=f\~jF475oՇ3<+~ƞ_ߢ]dTyUYݠt_J ?p:u}ISZn5g7rT3r{̯|vCFSj5@ݷ]G#Po>},Yc8eUF]TT`qISS&u썏ZSb?7=Ԛ[#ZljakmzxrA711{=̿W[ߠVROm=ltY8lL: "%>|/o3çnw&5{vYz&d\Y;.]Su}-W ׸kb~}j!r3©doOʯl]^IR'$UC EbeXΨ&vO(;[*;Y5 <-B-0={"yGJIScmT`CKUnQbi~oc#ߎݚUKUo/u:gw?&XzZb$H?C]_C8yI\7tnV?W!N阂J^PpNPB.8du\mUahVc7NmjyTuW]oPX#IW7=Zobi-\>&~9| ~?cR\}9NW_?oy AW+[<vo,?@ c _{WmոA'n£mL k=p=V ~}'q1z\#~E~O^.W)4]ᬊTJpTGg=:xߌ_">~+Zp#AWFO}3qx;=c&_:W/ytQ We,jW<T=wն~5\u=oӱ']p~-\0Wi|<oE"]\1Q0~WepUňa\Dg>QGqupuG''9F㪇{?>_?vfZCOoWӟCGIWBz?XgH~ީX:W~ʷQPCrڣ-*=#E"ٺ|,(K] p\y>swWʘbGM) L(a$KkKX!l{*Pz8ӸS0mIj␘C\0e0V?C61:{s 𛔶F\F!Bukb p ;"SOF SVQv e MT.?mc zD`k$! LpmxY3S1,әٝY ( ѥe%q觠m:r_8Ry(vֹqLjl4̊071قf_G.ҚBp0% (0e"38T[Xd;TkZ8AWE`"ADcPc6wOPŅ|u+ډ;u sNli&;wQefer6rg}Z|4ctN NWC:8s,&%.lu]-c#4AӧL",d#=(ʸ7W SI, }`NyqdfRz0D@R Oh+/܆Keg$i'#e~>9KW߷Yl?GvMȖ"7Cg3m.rpa/QY9s*57oAbds砍{^d3+1Ddѽ4iIl1GJtn`t)[)u_l/2NTz4jMLg@h8mu3sS˓YMut݀L t [)j_&ӕt7 Kά&V`j\HcW^.~wXW/[$Zo-c*-as j cD1W N +#JLz "َa1k6Tz^`3iON60FfK*f_aR[h([fz0QW@CKJnfm{~n2eֆat͒~0i"KC WF(3c±xΩџp|Abw[(7n k].tC9ݸCFdObcq4PSQJD72oS%l~^o%dsˉEsq_屾 k Tf&6/+զZϐWgeJ^z@E9zӪ0:\,݅_+1Un42 s%)/Sq; e71T)%=M&G$q%BO/(r+;lpF~^JR8H&Ci#߀%J?2M^S!=Jf_S AQ(E4Gv5}#k Ƿ(̲*upd$qr X`8ww{JdUwq|Oɨ}Jﳌ>O UǑ|+ 2Oa<foGFC'Zy e`|B׫G3ѣ<@*=@4"{FGH @0A?3=@4"y Oy OQ/{f#y !< Hdxy zmTJR)ҧEG|y D= )CfS3lv <}h e캸AD^;%DOvKтy ɾn 0 'ՐїvWW /~VR6IY;=7LY k7EQQFIs`d1sϼHiTNYx 8eoYSU8VMa*2Jތc~<8rnMS6mA\NR,yln<Ԃ_Pb=U&廸2O<o]%8R)iOQmy+ҫzW Сq_֏%m[? [tpt*Wf`6°.:ZknỈ-Iءg G*P=^n/;?ݨRWK6uq>SP7TA7O3[zm-=`6WL II=Wj/6MBs':brj0脳'{֣3=Y+QD\M)w*_s6rl2o^][sWKǦ}gWxLk_y⑸uiz5ճg;魯5"m吖w?"@t^*yQ |Q8WQ(b4m8011=Ir M Q0ϟ3k *,Jġ0W+Xk:SK?꺹B`)oDi9f"dARVQ S2; *aW%'F5T 7oqh/jVu״!xm!M4Emc:n,w= [իm ;E. *;Qg XÐQ;[i+1! oH.^G\p}[_};%ޱMYg>$raAjPb@bd~CoG$fƁ8Z~L8p1ʀaq`3q`8w q6FqmʾƢ<Ɓa`@b8xc8xe;lofe8E}8p; Ɓ<ƁBb8#tOWّ[ !LU$0+.3"?~xJO$WXJN)O»l`&řDw/ŘLp?RJX b}в˧8`7Y+0Ȍp|2j۷_ݚolξa_u$d$ BxS]RJgmN&1`Ԩ2k"!\x7_j~TEKztsKRU u/-e.Y̨Z%!܊T\/ EA Jt*eG7Qj R*ԘTcmgT@tE (,+v]byg%l.W _O?ADosׇlz:zV3`Wo!*.V㊁e"3o}}TE;k8|p(pCgD!7/aāpKC &P}騮@G9uppppppppppppV9$Ʌ%ˊD2eH$JTQܰJe5* ]_(i}BN҄J.ا*՜/o\xŒ7ʯ~DEA"YW 7dRk5tMՏ+B}lրrO"bŞf{!'L>kx/fOq?C$8߉pk'%\Sǡ'7yU?s#:8{) Qxrf0Ѿ45Ѿ zC~u~v[fڊ˖g2[piǏQP&TZ*-󿿡jkNZU٧-')U3kAo+zY+zTJW\bS9= =*+C0~oϾ7^ |cEABɚi\Tb51j.sk-'ߨKMWכrA:گ傁'4/t ~>}\a.-.-^b#:{䉓k8p?|6S&Or />vߪ~?e;^ߵJ);5ěqp88?lǟ}>)g9?/x^wԐr#A5xÉ{?V\НԐ̒嗮Y&Cg eGOS>H;TZV{tK^״x]{1J,8{,hohY_pLS#XZyD`?ҫ?''Mp?>AqCp\ޡߓboK.I `?5{8_R*//Ev-hhuhOJsrQ?W R;;;IMHV#>j̐  3I&!!!h|4wW]uۭJ]%jEY][U۪vn={~̝I4D )?{syNT-{8ՉlQR]9Ď??KT!<%F1bGGM+KE$2#F1bĈ#F1b8c7IGGLq?.0ɐ,ԲABz.+vc^e2bĈ#F9"iG,!%RFgVa4v>J5w\)҅%^ܽ"+rEdźh$"=nj m]De׬lHS4"EƚVo9"+vRk"kYT䉴H{KYhDhX-_C#2.o "@eбfYQ(::[W7E;:X ZdMM+T]hxE[+NԔmmUvň4DۅEWPYT)*׏B'NԾӼ^* B{%nx D^ yYYZ8 >otqGx7}ns(83bVDor%yiJUbt5ZxK~^[Ř[Ҿ="(<fef֔K#c3 1nb[zSRI+r-k\Q ;M([Ꮞ˸Dە+4Y`6V K4PX٥(@&%c%ldm_' eQ. 6ê@C_.ɐ"T85;%O8Zs,5xdO9+pUo^_%5}KJ8.X#k!:þPx~uW/kj!4+Ա/ 6!Y SŮUe Xn`CMx^ 7Pü:_E}U0}eU]*PF VˠT헋C8W".S$a=*V̯WWUSKPZkh`xA/X)0UTe> ٙt>UIGNEK%jc}PPTUꃐVY*J.ʤ UNJh7 ڶN.Z0VUtÇU?6T_Uߠ۶WX,S%{ZR&_|T^3ujiҲQVam)p4;;4u-yHeb"vnOqB1bĈ#ǘxPHl[RzE@(tQގ1=ߪ\$O[4fiqr.}xW[qQǻKmxP~͘MxWm uwV3FR Vo.;>>/C4Բn ٭6ӫWi?/I}\)嶮(H`yܭXN1{aOGx/T}NCL@@]X+pg9)l@zW-:6+VG q@]x7׊ь.vx{Ia@~bGb@3z7z#NHJwbB2V]r%0bȱ&  ObPO%ӦMO4uz4, ϮÁ*uOx'#F1bĈ#F1bOy#C% ON{RJoW*a=B1P ]Nu _ #F1bĈ#F[ӆ))ZYu,}y GW.kz(IQO%?_4dOi ;"MT6~";in+ڕ9 k[0SdW<ڑ}-2+;;9"kAgSN]Y錴t٪-|m𔣏8'蚵aկmNcD~uNJlPw"wFۺBZݱޥ+ X뢭NDo!;;i"K#FUXԴ ۟#RJ+..Q))[ä?yϥȀ~"\,(ٙJm"'K)t)rG+6"1R.E8xȥs) xإ(Sc bK1@)v9JӥHC9q)F!.Beeڥԥeeey˸m˸aZ&LghrKXmR RQ >ֶ[~4|}O-f G _2@O#Ep/D^z6''!~Z9KNv8a³TVT30C+Pp:5̧33اNfL.15} BGbJsI˼;]L5<`p1;  ѐAi 6T `Ci8C'8&P]-clW2co%KIC0v<})5蝥5c.!bPe5:ПKJR IT0[L~ˁr@M¹d4`hfcЍv~ T. ;ɈiDNYNYfYOڭG@1Zbfp0gk2y6nMҀ1ȵT1jW_kt^gOr<w^e>j }s=5Aj AQv0vk/`6QfE̤f5y2 vUюzX]ϳ4`ԺAJn,= w kzNYi bD)=:F(S ܿ e9țy \717`MY0 D Kt #?zZ4,)؃fOVoX0 ۄK6\R][V7 |'O0v^~#50&AU`_Ed NLip}t4F#<`5E@â&YC>j.%s%Ph4*@MT wzϭlj SA ~V"ϦϚicl ZNͳ>}^bhL`/WHDU,k,_/bbh_:oo0w hpF wzgOj~0:d-s:)ٹi$2. =}E|5}6sװqP3Ơy;M2j.!/v3LCzW۳%ǠX~3#d338|qUK5e$/#Hk)n=@=OI s0XNC0Ǖ)p;8n'LdPaa/Sƥw4B@-x]FXFЀ_1Sģb&`Cz&dC g%٧ Lp3Bp]{ Ҡ6q!ůo4fS/At"u.qܻ]!wXi'JV"K$;vnֻkkծM - mZPRJ_۔R~_S"oc޽{Wk+zXs~3gfΜ;W=WW34x.`u021PPD.`  a:l .t.zdI^(4N\eƱ)(buRE)ǁ,iJ<9n+OFEƋO*CpF: ^  >x.`F2^.kTT2uȏ1uq^ŻŸElCCJ4êA{^4pU2B){;\JY?;JbHDnpzpxPW.` -h%zpxq^I%C>"2/b^]-e8*)UO,xCgT<v.>@* fN?M'".vyuj os2T(OG[g)i<+Fٌ2<<[xNknvcwkMy?'r) F W`>,H+QӤiio|Q0NXaߢgQ-!8d:!%dQ=D> P^O_ESM8ύÈ9mh DGn%қ* FCt!.rw!{$)DW3Ԟ2:FG"?sGL11?^D71-FʈN n`4Ay`ԌFF2z  ͌ލh'/"sV" P{ Dm܊[ڬ۔VQ?]EˈvS{sy/ja=zQ }(f/0uqF~AF7YR|%iu'# 2B4Ȉx;D~ aD`q.p;,4Έz ÝVۃ+-V.0tUBtџ!z%!U(ݽHہZUxfLGȸ8a=mId1`ncDy3i FX`D&1D!3VPZ ܮWP{$*4nDS>(T4# #*z,C4͈ʛaD۪,!)0:AF}!z !pJceVmQaDZaB!F$K2UV71y5AШqPzFo#%AU#p<@O@[ABߒ#M)fa<x)5lO@;Èz\͹?7}744 sW#< ZFgDj=s1 9A3D_$ ^W#z0]`YՈ.D_f4,O0Ognj>+h,C:@#9O"OpFyպ:0ۣ_2nGT,a%/9 I*#Z}ÞgDs&pY|~C aIrMB,wзaFIo1'.p0/3zѷR'##*{eDCO` ԾL vʚ{ 9 ]qS?dDkeND<"h]96~^ D?&GoaN׈,3aˣD槌pkx3g$2g#d"F(i`*E1&PQUa]A]EHG\Oal D}΀IwmcQ3D[v5361̈Z3QW▸X@P&ka\Cd¸+hnGZM`رjMZh:aܗk% j?Xpʈf`izĪ=؏PKZhCAhբRD~K;#oxoQzـ/&Ҍ݌n`t3BKVYw1‘GD3oZ 8> ap0^AZ(So>Ak:  Β^kﰞ@|z`Lj*3Bt}?#FTFڻ{a 2t0[{ `^Ht{!>hAFB4ˎ0F8qaƀz>t*ᕌ(&C`V -'`Q=!Іi'p(OX7؄43 N" gDyψʃqf8@-ϱKl]ƀqCLV[@/)F4Ҥ f60#ˈx 3fiF |j{n0'\`D:e[ )=!FԆs#ĺAFh5r aF$?0\@y>o?n߲Z⭌ɿ&F8c4@>Dog g #~NczüFBd270j@Q팈-WW3ـy #50pjº=XY)"XWR`6G9$41eˁ&. |q6a J _83{>A6~F} #!npueXbb61^mnfDZLA??O?#fFR1gDJ[LiuFW1̈́Xv2;Qˀ-Z%0K݁ a-GV.`}ckmړI\Ǜw1چ#"#{ļ/d3"d~(#\󚽌1" <묧 {7,$[zj+ayG? ;$)I_IcR;e=6Y\K&؅ ;Гߎ>Av"ؕmzȈz#z3B˶fDsH<I*|'!`DzY"Xa{R.MャЂjTGk5?H ~F$ɿaL͋_!ia>.pDGWh6v `^#j_goc0!ڵ4?ʈZ#,+$Io`mxQmȽ%Ө F$`B[2 7sYڏORMg-f D⺃ݬfG4#TaL@xTkhdҧ'ĵkXU#jIy@0v,г48'h8"Is4h{l_b96=ZAjt*ۜOJ5&G(&1G{֑0 +S+@ܫ5?1:Y4ˈg1F7a;6a fz'!~*?bD+#%1xV0Ky# O+#^V#m#\s{Qmp>"<0a聱=8t>V8/#s[ģ!D;\R29no?#>F9@C#*Wxk*,>*W9@+QVO=Jo;yQQmVfsaU"ڧ=0* vQF4fD;h!3)=pnaܤ{/#-{^IK|5#ū,yhLQՀ4滬sqFT^y[z"3#>z@׺wz/7{)FT{0yIB,`_=)Fh q$h|iauK4ÈYBaDraD v,! 2z+,x@<.ؿzF8/<ˆzCpt1BaBeD1AGPc'a`mB$MJꁾ+3/~m2"ͯ[A{jwb)>=\z@aHLO4_`JD"Fh1FT1nF)w}ܒ2 #0gK86R}a%Vm`"+#H ۈm#b~7$ĹzS8#y`'ߐx6p 8{JS ?Ee)F$g{XH9OH/=1B ׋z {@v<Ȉ(|#9G4TC+/X`%/'T(TO]]]_롣J[9oy * NglIqI&M4iҤI&M4iZt"8W'ĕj1gEndtP% %![։Cc=M4iҤI&M4iҴ3F5Go?IeSS_yxdS6?>V =uB5b#:`QQ+1[j6|+pBmu\A9Gid㓱dC'Ar~U,?Pn%!+_~_)^W5E~˨8MB}@oSј(4Հ\K_3&M+bp.u[fդI&M4iҤI&M. 'й+5iҤI&M4iҤIӲWO$rB/|N%KnW4t8 %bP&I%Q&m`F0 yDXnhqL3yt*访H :jfkwYjnewgh,?KiE<oK Et RHMgCn* O٢(T(?;Nd{JЙx"[y!*G )Aaa+.>OfBt4{%2 gBKD|2ULB[E_oSUNdm<(گZॊW8+`T>'Zb먧Id0SȠ9HlH^L/9oD<+/R]0#t, TXS#0Owz 9=NfٝE܊X sϭj>5$q$Y΀[|I~yx8NFbU]^aYrv;`"ŕAys`r[ dj`0'/RݸwŸ& efbxf"heVZkeZ'/gM@`-d,N+jD. M*N$BT$rdd-PeTb[ڤ_Ulp6Mج6~lVTYm:`٬6btf煳YmRfͮ gڤjfIզ6w]8&UlV}lVTYm:T߫Mk)j"3]Pt!cQ"Z^p2".bH: JNkqYv('tDN-ʴ@sH:#=}{Fwuvcsx( g%PQpY#¶QG PH4Zb6UFƑ#ۙP6|?,.g"t&Vqr_}ͦaG{*FcD8G<NFD*ϜNu6O ٟ(#,ґe 2϶VO35L~6y6c]VfʦRߡ!GaO`\>sA]: ÛVrt-@riZ=GfP:RT[gk)㪽lv/Zv (ƂT>Mr-ʴ֢lp`M93x88hEK&x*wʼtKfI= b%wXb+Ȱ:<8R\oq.wZRYF[tk~|S"Tqصqۡ:+[0@ɗO  ,-?&$RT)7/S ymמgUe^ty9(|y יoKA^D;@pUzx]v? A% %f.рMDж*%-i h ]H@/BmRZKkʖSȬIm_^Im@R;Tڡ%u(Ԏӎ ĴJ1u.ZLWB\^I,?ORP j*TM^W^Z^i*/UJ[|wi^Һ\juE i iwy!Uw]@HճYUIj"P]x(*ûdp/(*/ʻe[ɖRw/`%;Z[{ Jws 4.ՌԖBkZW[^+|kyuEM~Uou-*j\Es! 5E, g+EVl߫U |&(!adR)l㫔W n}&(!ץXRZX-0%u5l-f]Yu6. yj]͵v5׮.]ͫhWsj]͵jWsjb\x Df%Hv5ג v5b:v5_قv5׮+_Zv5_B݊ծiWsjҭ]͵Z5ծ+|KkWsjl R7v5/-+K.۾Io9*l'|盆C~P8,Dj/UCсxhP.Z*aϧ|tt&=HkK\% g#B>Jy:j %(rt8z#E)6L69%霌RyAC2gea#88,`V0ܾ᎕pg w$*`k%1]+]0k%1w$w!|i xA˭Epev͑Ŵŕ,k;M|?XDŌ NZ'9?CY7m/M0/c6 pNW!(mQn)Z~lO*T >/;/z'z'z'+a= ԓ@= r(D_}k~ϝ͆s=vu2Op6-*khe- ijP43F1d"Ѷyx8Nmfp 'v:;}#nrޱo,J./:{Qx;C`[ U`{IX.`{ W`GIXoS.;*ణR;Ka,HH,vVbg,vfkAK$$*`RKؽ %])JkAK$$wUJY] X"!wW"}>n]XoHʊZ1 /eƗFtz >eJ%e>+h4|.<ؔJ|V2*p%Fx^I>t:N`ၧTR泒'{|y9]βSXu}]D֟!%BeRt&eJm]R:/‘i 2ZђJ<nM-Uq"S!gNkCd4e7NWSHqӨp[(a{"ykx?8=Wˠ>">j%uګQԶ*rx2 &ݾr;oJhR "o&)8qNV u[1n7D7[M8cB)mk8 9r{/ʨdGM|b?UKSS?b 8F&BȩZd"J7ER_K*BEW ?,;K䖈հ%%2ٞ5isI[QM*IKMږ&n5i/I{e5iwIդí&E5騬&.5Xtդ&դӥ&KT.tդtԤkjVtWVnt/QMvdWQMvUV].5ٵD5VE5]YMvdcckcغTq]Jz~Fp(?TC~u G۰Xq?:G@Cm,p?6ȿnDL 'ͥ7Tӛj)7˴\oM57JSJoM5ƫ0fӫzS͵&zSMo{kMT+XMRћjYpu"Vgenp}TuF/uSItάQBͽJ~oډ'1a2o5:d)Hwfl:ˆlґ>. VOYw~\eT&ZIeqeDTF F@ptl(,i6+@TVrm)VrܞH%c G&;ήfPŤbImprqD'wKn#P:[]k6 ɋ&@/ ʦ -afeҗImi(>g(JY4vӲ~ )NV.%JSŦj٦Z ˊS}{[okɯ'g@d:ծh(>jyV(bTU0ފQ[+FubT+F-nEx?=8똷9`"PEBGemYۼ ,Ew λ%Έ%¥"Ν6XhM/nqWۺ~rA-`ik&79#JJq|0- 7 f`d#MwrGbs-EjctjXR<*j"@ >?L+oy~] ܑ<`X(h,\{-29*+}.1t哏Au$;T쿹%_Â{pgKYD>[v,}Ey-nNݦȎ)  nww9nLop`pȿn,) K΋>@˓"yV- ͜N1NEn?tl`h Nm tt௼D fkW.>+$'/³D,Y:^,Y Ο_gA<)k#!M;ZW~9nԶYw# bU\^ g> Sx*?NfezaәcM2Mk qNn43'E49H0ЪOzۏ!~|=W/Yud,Rlԣ 2dll_x eNZmF% !_#w ,/}hp7td;OGұp6M;JVfᅢ%-2JjbM >ACX6I!| O%T2I%\=n4k40Ξ%j<ZDŽxf*fFa XxǨzF0!]S|M2Fj amb?BzqM[+P/652dZjy7n#rqRf (`wkzf Rx'm/O\]#9l{[[@O2ZqţX.z<rbNa!9 / ^bE.S–Sc1˹XMrvYr#}ãA8voxhg`h,phF=?ܱ-/  _C=c=㹑RBYP=Cc  Cc#=}AxX?7<4x,`P1Lw`(;;>CՑP,u<?0J Hܡ1mC=G{!]c d]Cdmtъ5h=LJCxBhx(toptB{{; ȺGCiX#+!2)du9,@6r@?\pttx4_\6"d!wϠ"5 h!hѠ6|(8t@> M2YYOu'o$':gWx_Ӱq#8[.]+x} ';<lo=B&p_FVA*\/| bh, G n&L6 Lpό@h8'͢q4v"%et "YB[ECO>ـh06 E.Ʀ7Tv۹ɩP6|\X+SSlՉ ȴjuT"IT7a}YBrq&QM ki[9V:5'VB/`eoN3h `<?yRmL&AzH|jBflgP[ zci%➻z# uۏ pzPXf<7dvuIDMWgNm# u^BoFͤ'M7U+=LbX4#XǢQ:uRXfy`XgZts{>y)u0w'ET<5{vor$)Dp=yyv ^'Sð^+O[/=F,GfOlpT9=WT!`XM:GF3sN!#w!4*d\E!M# rjr'Pmk:)@}]/6 7w6&mIlDw+i 2q^N*?$$5%{KQ{7 o4]2[sƆ] \l(xegu{a5͙u?W- ~{C%`M—yN xɜgs xkחߎ9Ð극6׃?F؟\=glu; o6cz9;svSkTt ;XGk#369pސs+8W`3a sBs=_'[1N.spXǮ{kLcLDzrXxXuZo) _~ )U?# )M4iҤI&M4iҤIJ%w O$ܼϔs ȍ=K'cښV V`XϡgM4iҤI&M^D/OIN"'gU!jD\i 4BGvvsΊ5&ZQ V2NǒقDt:!jjkZ_dfwC\8/sc]]sƗm1r΢ZS\hNHFO6> =JBC˼ZGWkwY Z/ @e, ?g?=JߵFU# 𝇺ZCΏ[= =W%34-'\˩m Ɨ7nh_M4iҤI&M4iҤiՑa! }Kr/RUCIjҤI&M4iҤI_W'?8\0A'\Z#|جE8~K@x0_7VV*xirmUmg_LdSa 5YW}\j"S(t٦ߦ-}&M4iҤI&M4itIaBi+sN} "gjҤI&M4iҤITuy'Uaڬмw\x.uuX}W_|7¿A>cЏ&M+vt6l)+mlTդI&M4iҤI&MV!֣35DvմHGkҤI&M4iҤI"ҝf5bxP̀ >(UIۭ pM"z:}8#ET4eWGc\/MSSQݺ2$p[9pɠ3e@]^4! [m> i )s|9C_;vL0 {aa#ȝI\Qgalera-3-25.3.20/scripts/mysql/LICENSE.mysql0000644000015300001660000004517713042054732020161 0ustar jenkinsjenkins GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble ======== The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a. You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b. You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c. If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a. Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b. Accompany it with a written offer, valid for at least three years, to give any third-party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c. Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs ============================================= If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. ONE LINE TO GIVE THE PROGRAM'S NAME AND A BRIEF IDEA OF WHAT IT DOES. Copyright (C) YYYY NAME OF AUTHOR This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) 19YY NAME OF AUTHOR Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. SIGNATURE OF TY COON, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. galera-3-25.3.20/scripts/mysql/debian/0000755000015300001660000000000013042054732017214 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/echo_stderr0000755000015300001660000000003313042054732021437 0ustar jenkinsjenkins#!/bin/bash echo "$*" 1>&2 galera-3-25.3.20/scripts/mysql/debian/mysqld_safe-5.50000755000015300001660000006323113042054732021763 0ustar jenkinsjenkins#!/bin/sh # Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB # This file is public domain and comes with NO WARRANTY of any kind # # Script to start the MySQL daemon and restart it if it dies unexpectedly # # This should be executed in the MySQL base directory if you are using a # binary installation that is not installed in its compile-time default # location # # mysql.server works by first doing a cd to the base directory and from there # executing mysqld_safe # Initialize script globals KILL_MYSQLD=1; MYSQLD= niceness=0 mysqld_ld_preload= mysqld_ld_library_path= # Initial logging status: error log is not open, and not using syslog logging=init want_syslog=0 syslog_tag= user='mysql' pid_file= err_log= syslog_tag_mysqld=mysqld syslog_tag_mysqld_safe=mysqld_safe umask 007 defaults= case "$1" in --no-defaults|--defaults-file=*|--defaults-extra-file=*) defaults="$1"; shift ;; esac usage () { cat <> "$err_log" ;; syslog) logger -t "$syslog_tag_mysqld_safe" -p "$priority" "$*" ;; *) echo "Internal program error (non-fatal):" \ " unknown logging method '$logging'" >&2 ;; esac } log_error () { log_generic daemon.error "$@" >&2 } log_notice () { log_generic daemon.notice "$@" } eval_log_error () { local cmd="$1" case $logging in file) cmd="$cmd >> "`shell_quote_string "$err_log"`" 2>&1" ;; syslog) # mysqld often prefixes its messages with a timestamp, which is # redundant when logging to syslog (which adds its own timestamp) # However, we don't strip the timestamp with sed here, because # sed buffers output (only GNU sed supports a -u (unbuffered) option) # which means that messages may not get sent to syslog until the # mysqld process quits. cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error & wait" ;; *) echo "Internal program error (non-fatal):" \ " unknown logging method '$logging'" >&2 ;; esac #echo "Running mysqld: [$cmd]" eval "$cmd" } shell_quote_string() { # This sed command makes sure that any special chars are quoted, # so the arg gets passed exactly to the server. echo "$1" | sed -e 's,\([^a-zA-Z0-9/_.=-]\),\\\1,g' } wsrep_pick_url() { [ $# -eq 0 ] return 0 if ! which nc >/dev/null; then log_error "ERROR: nc tool not found in PATH! Make sure you have it installed." return 1 fi local url # Assuming URL in the form scheme://host:port # If host and port are not NULL, the liveness of URL is assumed to be tested # If port part is absent, the url is returned literally and unconditionally # If every URL has port but none is reachable, nothing is returned for url in `echo $@ | sed s/,/\ /g` 0; do local host=`echo $url | cut -d \: -f 2 | sed s/^\\\/\\\///` local port=`echo $url | cut -d \: -f 3` [ -z "$port" ] && break nc -z "$host" $port >/dev/null && break done if [ "$url" == "0" ]; then log_error "ERROR: none of the URLs in '$@' is reachable." return 1 fi echo $url } # Run mysqld with --wsrep-recover and parse recovered position from log. # Position will be stored in wsrep_start_position_opt global. wsrep_start_position_opt="" wsrep_recover_position() { local mysqld_cmd="$@" local wr_logfile=$(mktemp) local euid=$(id -u) local ret=0 [ "$euid" = "0" ] && chown $user $wr_logfile chmod 600 $wr_logfile log_notice "WSREP: Running position recovery with --log_error=$wr_logfile" $mysqld_cmd --log_error=$wr_logfile --wsrep-recover local rp="$(grep 'WSREP: Recovered position:' $wr_logfile)" if [ -z "$rp" ]; then local skipped="$(grep WSREP $wr_logfile | grep 'skipping position recovery')" if [ -z "$skipped" ]; then log_error "WSREP: Failed to recover position: " `cat $wr_logfile`; ret=1 else log_notice "WSREP: Position recovery skipped" fi else local start_pos="$(echo $rp | sed 's/.*WSREP\:\ Recovered\ position://' \ | sed 's/^[ \t]*//')" log_notice "WSREP: Recovered position $start_pos" wsrep_start_position_opt="--wsrep_start_position=$start_pos" fi rm $wr_logfile return $ret } parse_arguments() { # We only need to pass arguments through to the server if we don't # handle them here. So, we collect unrecognized options (passed on # the command line) into the args variable. pick_args= if test "$1" = PICK-ARGS-FROM-ARGV then pick_args=1 shift fi for arg do # the parameter after "=", or the whole $arg if no match val=`echo "$arg" | sed -e 's;^--[^=]*=;;'` # what's before "=", or the whole $arg if no match optname=`echo "$arg" | sed -e 's/^\(--[^=]*\)=.*$/\1/'` # replace "_" by "-" ; mysqld_safe must accept "_" like mysqld does. optname_subst=`echo "$optname" | sed 's/_/-/g'` arg=`echo $arg | sed "s/^$optname/$optname_subst/"` case "$arg" in # these get passed explicitly to mysqld --basedir=*) MY_BASEDIR_VERSION="$val" ;; --datadir=*) DATADIR="$val" ;; --pid-file=*) pid_file="$val" ;; --plugin-dir=*) PLUGIN_DIR="$val" ;; --user=*) user="$val"; SET_USER=1 ;; # these might have been set in a [mysqld_safe] section of my.cnf # they are added to mysqld command line to override settings from my.cnf --log-error=*) err_log="$val" ;; --port=*) mysql_tcp_port="$val" ;; --socket=*) mysql_unix_port="$val" ;; # mysqld_safe-specific options - must be set in my.cnf ([mysqld_safe])! --core-file-size=*) core_file_size="$val" ;; --ledir=*) ledir="$val" ;; --malloc-lib=*) set_malloc_lib "$val" ;; --mysqld=*) MYSQLD="$val" ;; --mysqld-version=*) if test -n "$val" then MYSQLD="mysqld-$val" PLUGIN_VARIANT="/$val" else MYSQLD="mysqld" fi ;; --nice=*) niceness="$val" ;; --open-files-limit=*) open_files="$val" ;; --open_files_limit=*) open_files="$val" ;; --skip-kill-mysqld*) KILL_MYSQLD=0 ;; --syslog) want_syslog=1 ;; --skip-syslog) want_syslog=0 ;; --syslog-tag=*) syslog_tag="$val" ;; --timezone=*) TZ="$val"; export TZ; ;; --wsrep[-_]urls=*) wsrep_urls="$val"; ;; --wsrep[-_]provider=*) if test -n "$val" && test "$val" != "none" then wsrep_restart=1 fi ;; --help) usage ;; *) if test -n "$pick_args" then append_arg_to_args "$arg" fi ;; esac done } # Add a single shared library to the list of libraries which will be added to # LD_PRELOAD for mysqld # # Since LD_PRELOAD is a space-separated value (for historical reasons), if a # shared lib's path contains spaces, that path will be prepended to # LD_LIBRARY_PATH and stripped from the lib value. add_mysqld_ld_preload() { lib_to_add="$1" log_notice "Adding '$lib_to_add' to LD_PRELOAD for mysqld" case "$lib_to_add" in *' '*) # Must strip path from lib, and add it to LD_LIBRARY_PATH lib_file=`basename "$lib_to_add"` case "$lib_file" in *' '*) # The lib file itself has a space in its name, and can't # be used in LD_PRELOAD log_error "library name '$lib_to_add' contains spaces and can not be used with LD_PRELOAD" exit 1 ;; esac lib_path=`dirname "$lib_to_add"` lib_to_add="$lib_file" [ -n "$mysqld_ld_library_path" ] && mysqld_ld_library_path="$mysqld_ld_library_path:" mysqld_ld_library_path="$mysqld_ld_library_path$lib_path" ;; esac # LD_PRELOAD is a space-separated [ -n "$mysqld_ld_preload" ] && mysqld_ld_preload="$mysqld_ld_preload " mysqld_ld_preload="${mysqld_ld_preload}$lib_to_add" } # Returns LD_PRELOAD (and LD_LIBRARY_PATH, if needed) text, quoted to be # suitable for use in the eval that calls mysqld. # # All values in mysqld_ld_preload are prepended to LD_PRELOAD. mysqld_ld_preload_text() { text= if [ -n "$mysqld_ld_preload" ]; then new_text="$mysqld_ld_preload" [ -n "$LD_PRELOAD" ] && new_text="$new_text $LD_PRELOAD" text="${text}LD_PRELOAD="`shell_quote_string "$new_text"`' ' fi if [ -n "$mysqld_ld_library_path" ]; then new_text="$mysqld_ld_library_path" [ -n "$LD_LIBRARY_PATH" ] && new_text="$new_text:$LD_LIBRARY_PATH" text="${text}LD_LIBRARY_PATH="`shell_quote_string "$new_text"`' ' fi echo "$text" } mysql_config= get_mysql_config() { if [ -z "$mysql_config" ]; then mysql_config=`echo "$0" | sed 's,/[^/][^/]*$,/mysql_config,'` if [ ! -x "$mysql_config" ]; then log_error "Can not run mysql_config $@ from '$mysql_config'" exit 1 fi fi "$mysql_config" "$@" } # set_malloc_lib LIB # - If LIB is empty, do nothing and return # - If LIB is 'tcmalloc', look for tcmalloc shared library in /usr/lib # then pkglibdir. tcmalloc is part of the Google perftools project. # - If LIB is an absolute path, assume it is a malloc shared library # # Put LIB in mysqld_ld_preload, which will be added to LD_PRELOAD when # running mysqld. See ld.so for details. set_malloc_lib() { malloc_lib="$1" if [ "$malloc_lib" = tcmalloc ]; then pkglibdir=`get_mysql_config --variable=pkglibdir` malloc_lib= # This list is kept intentionally simple. Simply set --malloc-lib # to a full path if another location is desired. for libdir in /usr/lib "$pkglibdir" "$pkglibdir/mysql"; do for flavor in _minimal '' _and_profiler _debug; do tmp="$libdir/libtcmalloc$flavor.so" #log_notice "DEBUG: Checking for malloc lib '$tmp'" [ -r "$tmp" ] || continue malloc_lib="$tmp" break 2 done done if [ -z "$malloc_lib" ]; then log_error "no shared library for --malloc-lib=tcmalloc found in /usr/lib or $pkglibdir" exit 1 fi fi # Allow --malloc-lib='' to override other settings [ -z "$malloc_lib" ] && return case "$malloc_lib" in /*) if [ ! -r "$malloc_lib" ]; then log_error "--malloc-lib '$malloc_lib' can not be read and will not be used" exit 1 fi ;; *) log_error "--malloc-lib must be an absolute path or 'tcmalloc'; " \ "ignoring value '$malloc_lib'" exit 1 ;; esac add_mysqld_ld_preload "$malloc_lib" } # # First, try to find BASEDIR and ledir (where mysqld is) # if echo '/usr/share/mysql' | grep '^/usr' > /dev/null then relpkgdata=`echo '/usr/share/mysql' | sed -e 's,^/usr,,' -e 's,^/,,' -e 's,^,./,'` else # pkgdatadir is not relative to prefix relpkgdata='/usr/share/mysql' fi MY_PWD=`pwd` # Check for the directories we would expect from a binary release install if test -n "$MY_BASEDIR_VERSION" -a -d "$MY_BASEDIR_VERSION" then # BASEDIR is already overridden on command line. Do not re-set. # Use BASEDIR to discover le. if test -x "$MY_BASEDIR_VERSION/libexec/mysqld" then ledir="$MY_BASEDIR_VERSION/libexec" elif test -x "$MY_BASEDIR_VERSION/sbin/mysqld" then ledir="$MY_BASEDIR_VERSION/sbin" else ledir="$MY_BASEDIR_VERSION/bin" fi elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/bin/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where bin, share and data are ledir="$MY_PWD/bin" # Where mysqld is # Check for the directories we would expect from a source install elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/libexec/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where libexec, share and var are ledir="$MY_PWD/libexec" # Where mysqld is elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/sbin/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where sbin, share and var are ledir="$MY_PWD/sbin" # Where mysqld is # Since we didn't find anything, used the compiled-in defaults else MY_BASEDIR_VERSION='/usr' ledir='/usr/sbin' fi # # Second, try to find the data directory # # Try where the binary installs put it if test -d $MY_BASEDIR_VERSION/data/mysql then DATADIR=$MY_BASEDIR_VERSION/data if test -z "$defaults" -a -r "$DATADIR/my.cnf" then defaults="--defaults-extra-file=$DATADIR/my.cnf" fi # Next try where the source installs put it elif test -d $MY_BASEDIR_VERSION/var/mysql then DATADIR=$MY_BASEDIR_VERSION/var # Or just give up and use our compiled-in default else DATADIR=/var/lib/mysql fi # # Try to find the plugin directory # # Use user-supplied argument if [ -n "${PLUGIN_DIR}" ]; then plugin_dir="${PLUGIN_DIR}" else # Try to find plugin dir relative to basedir for dir in lib/mysql/plugin lib/plugin do if [ -d "${MY_BASEDIR_VERSION}/${dir}" ]; then plugin_dir="${MY_BASEDIR_VERSION}/${dir}" break fi done # Give up and use compiled-in default if [ -z "${plugin_dir}" ]; then plugin_dir='/usr/lib/mysql/plugin' fi fi plugin_dir="${plugin_dir}${PLUGIN_VARIANT}" if test -z "$MYSQL_HOME" then if test -r "$MY_BASEDIR_VERSION/my.cnf" && test -r "$DATADIR/my.cnf" then log_error "WARNING: Found two instances of my.cnf - $MY_BASEDIR_VERSION/my.cnf and $DATADIR/my.cnf IGNORING $DATADIR/my.cnf" MYSQL_HOME=$MY_BASEDIR_VERSION elif test -r "$DATADIR/my.cnf" then log_error "WARNING: Found $DATADIR/my.cnf The data directory is a deprecated location for my.cnf, please move it to $MY_BASEDIR_VERSION/my.cnf" MYSQL_HOME=$DATADIR else MYSQL_HOME=$MY_BASEDIR_VERSION fi fi export MYSQL_HOME # Get first arguments from the my.cnf file, groups [mysqld] and [mysqld_safe] # and then merge with the command line arguments if test -x "$MY_BASEDIR_VERSION/bin/my_print_defaults" then print_defaults="$MY_BASEDIR_VERSION/bin/my_print_defaults" elif test -x `dirname $0`/my_print_defaults then print_defaults="`dirname $0`/my_print_defaults" elif test -x ./bin/my_print_defaults then print_defaults="./bin/my_print_defaults" elif test -x /usr/bin/my_print_defaults then print_defaults="/usr/bin/my_print_defaults" elif test -x /usr/bin/mysql_print_defaults then print_defaults="/usr/bin/mysql_print_defaults" else print_defaults="my_print_defaults" fi append_arg_to_args () { args="$args "`shell_quote_string "$1"` } args= SET_USER=2 parse_arguments `$print_defaults $defaults --loose-verbose mysqld server` if test $SET_USER -eq 2 then SET_USER=0 fi parse_arguments `$print_defaults $defaults --loose-verbose mysqld_safe safe_mysqld` parse_arguments PICK-ARGS-FROM-ARGV "$@" # Determine what logging facility to use # Ensure that 'logger' exists, if it's requested if [ $want_syslog -eq 1 ] then my_which logger > /dev/null 2>&1 if [ $? -ne 0 ] then log_error "--syslog requested, but no 'logger' program found. Please ensure that 'logger' is in your PATH, or do not specify the --syslog option to mysqld_safe." exit 1 fi fi if [ -n "$err_log" -o $want_syslog -eq 0 ] then if [ -n "$err_log" ] then # mysqld adds ".err" if there is no extension on the --log-error # argument; must match that here, or mysqld_safe will write to a # different log file than mysqld # mysqld does not add ".err" to "--log-error=foo."; it considers a # trailing "." as an extension if expr "$err_log" : '.*\.[^/]*$' > /dev/null then : else err_log="$err_log".err fi case "$err_log" in /* ) ;; * ) err_log="$DATADIR/$err_log" ;; esac else err_log=$DATADIR/`hostname`.err fi append_arg_to_args "--log-error=$err_log" if [ $want_syslog -eq 1 ] then # User explicitly asked for syslog, so warn that it isn't used log_error "Can't log to error log and syslog at the same time. Remove all --log-error configuration options for --syslog to take effect." fi # Log to err_log file log_notice "Logging to '$err_log'." logging=file else if [ -n "$syslog_tag" ] then # Sanitize the syslog tag syslog_tag=`echo "$syslog_tag" | sed -e 's/[^a-zA-Z0-9_-]/_/g'` syslog_tag_mysqld_safe="${syslog_tag_mysqld_safe}-$syslog_tag" syslog_tag_mysqld="${syslog_tag_mysqld}-$syslog_tag" fi log_notice "Logging to syslog." logging=syslog fi USER_OPTION="" if test -w / -o "$USER" = "root" then if test "$user" != "root" -o $SET_USER = 1 then USER_OPTION="--user=$user" fi # Change the err log to the right user, if it is in use if [ $want_syslog -eq 0 ]; then touch "$err_log" chown $user "$err_log" fi if test -n "$open_files" then ulimit -n $open_files fi fi if test -n "$open_files" then append_arg_to_args "--open-files-limit=$open_files" fi safe_mysql_unix_port=${mysql_unix_port:-${MYSQL_UNIX_PORT:-/var/run/mysqld/mysqld.sock}} # Make sure that directory for $safe_mysql_unix_port exists mysql_unix_port_dir=`dirname $safe_mysql_unix_port` if [ ! -d $mysql_unix_port_dir ] then mkdir $mysql_unix_port_dir chown $user $mysql_unix_port_dir chmod 755 $mysql_unix_port_dir fi # If the user doesn't specify a binary, we assume name "mysqld" if test -z "$MYSQLD" then MYSQLD=mysqld fi if test ! -x "$ledir/$MYSQLD" then log_error "The file $ledir/$MYSQLD does not exist or is not executable. Please cd to the mysql installation directory and restart this script from there as follows: ./bin/mysqld_safe& See http://dev.mysql.com/doc/mysql/en/mysqld-safe.html for more information" exit 1 fi if test -z "$pid_file" then pid_file="$DATADIR/`hostname`.pid" else case "$pid_file" in /* ) ;; * ) pid_file="$DATADIR/$pid_file" ;; esac fi append_arg_to_args "--pid-file=$pid_file" if test -n "$mysql_unix_port" then append_arg_to_args "--socket=$mysql_unix_port" fi if test -n "$mysql_tcp_port" then append_arg_to_args "--port=$mysql_tcp_port" fi if test $niceness -eq 0 then NOHUP_NICENESS="nohup" else NOHUP_NICENESS="nohup nice -$niceness" fi # Using nice with no args to get the niceness level is GNU-specific. # This check could be extended for other operating systems (e.g., # BSD could use "nohup sh -c 'ps -o nice -p $$' | tail -1"). # But, it also seems that GNU nohup is the only one which messes # with the priority, so this is okay. if nohup nice > /dev/null 2>&1 then normal_niceness=`nice` nohup_niceness=`nohup nice 2>/dev/null` numeric_nice_values=1 for val in $normal_niceness $nohup_niceness do case "$val" in -[0-9] | -[0-9][0-9] | -[0-9][0-9][0-9] | \ [0-9] | [0-9][0-9] | [0-9][0-9][0-9] ) ;; * ) numeric_nice_values=0 ;; esac done if test $numeric_nice_values -eq 1 then nice_value_diff=`expr $nohup_niceness - $normal_niceness` if test $? -eq 0 && test $nice_value_diff -gt 0 && \ nice --$nice_value_diff echo testing > /dev/null 2>&1 then # nohup increases the priority (bad), and we are permitted # to lower the priority with respect to the value the user # might have been given niceness=`expr $niceness - $nice_value_diff` NOHUP_NICENESS="nice -$niceness nohup" fi fi else if nohup echo testing > /dev/null 2>&1 then : else # nohup doesn't work on this system NOHUP_NICENESS="" fi fi # Try to set the core file size (even if we aren't root) because many systems # don't specify a hard limit on core file size. if test -n "$core_file_size" then ulimit -c $core_file_size fi # # If there exists an old pid file, check if the daemon is already running # Note: The switches to 'ps' may depend on your operating system if test -f "$pid_file" then PID=`cat "$pid_file"` if kill -0 $PID > /dev/null 2> /dev/null then if ps wwwp $PID | grep -v mysqld_safe | grep -- $MYSQLD > /dev/null then # The pid contains a mysqld process log_error "A mysqld process already exists" exit 1 fi fi rm -f "$pid_file" if test -f "$pid_file" then log_error "Fatal error: Can't remove the pid file: $pid_file Please remove it manually and start $0 again; mysqld daemon not started" exit 1 fi fi # # From now on, we catch signals to do a proper shutdown of mysqld # when signalled to do so. # trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf refresh & wait' 1 # HUP trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf shutdown' 2 3 15 # INT QUIT and TERM # # Uncomment the following lines if you want all tables to be automatically # checked and repaired during startup. You should add sensible key_buffer # and sort_buffer values to my.cnf to improve check performance or require # less disk space. # Alternatively, you can start mysqld with the "myisam-recover" option. See # the manual for details. # # echo "Checking tables in $DATADIR" # $MY_BASEDIR_VERSION/bin/myisamchk --silent --force --fast --medium-check $DATADIR/*/*.MYI # $MY_BASEDIR_VERSION/bin/isamchk --silent --force $DATADIR/*/*.ISM # Does this work on all systems? #if type ulimit | grep "shell builtin" > /dev/null #then # ulimit -n 256 > /dev/null 2>&1 # Fix for BSD and FreeBSD systems #fi cmd="`mysqld_ld_preload_text`$NOHUP_NICENESS" for i in "$ledir/$MYSQLD" "$defaults" "--basedir=$MY_BASEDIR_VERSION" \ "--datadir=$DATADIR" "--plugin-dir=$plugin_dir" "$USER_OPTION" do cmd="$cmd "`shell_quote_string "$i"` done cmd="$cmd $args" # Avoid 'nohup: ignoring input' warning nohup_redir="" test -n "$NOHUP_NICENESS" && nohup_redir=" < /dev/null" log_notice "Starting $MYSQLD daemon with databases from $DATADIR" # variable to track the current number of "fast" (a.k.a. subsecond) restarts fast_restart=0 # maximum number of restarts before trottling kicks in max_fast_restarts=5 # flag whether a usable sleep command exists have_sleep=1 # maximum number of wsrep restarts max_wsrep_restarts=0 while true do rm -f $safe_mysql_unix_port "$pid_file" # Some extra safety start_time=`date +%M%S` # this sets wsrep_start_position_opt wsrep_recover_position "$cmd" [ $? -ne 0 ] && exit 1 # [ -n "$wsrep_urls" ] && url=`wsrep_pick_url $wsrep_urls` # check connect address if [ -z "$url" ] then eval_log_error "$cmd $wsrep_start_position_opt $nohup_redir" else eval_log_error "$cmd $wsrep_start_position_opt --wsrep_cluster_address=$url $nohup_redir" fi end_time=`date +%M%S` if test ! -f "$pid_file" # This is removed if normal shutdown then break fi # sanity check if time reading is sane and there's sleep if test $end_time -gt 0 -a $have_sleep -gt 0 then # throttle down the fast restarts if test $end_time -eq $start_time then fast_restart=`expr $fast_restart + 1` if test $fast_restart -ge $max_fast_restarts then log_notice "The server is respawning too fast. Sleeping for 1 second." sleep 1 sleep_state=$? if test $sleep_state -gt 0 then log_notice "The server is respawning too fast and no working sleep command. Turning off trottling." have_sleep=0 fi fast_restart=0 fi else fast_restart=0 fi fi if true && test $KILL_MYSQLD -eq 1 then # Test if one process was hanging. # This is only a fix for Linux (running as base 3 mysqld processes) # but should work for the rest of the servers. # The only thing is ps x => redhat 5 gives warnings when using ps -x. # kill -9 is used or the process won't react on the kill. numofproces=`ps xaww | grep -v "grep" | grep "$ledir/$MYSQLD\>" | grep -c "pid-file=$pid_file"` log_notice "Number of processes running now: $numofproces" I=1 while test "$I" -le "$numofproces" do PROC=`ps xaww | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "pid-file=$pid_file" | sed -n '$p'` for T in $PROC do break done # echo "TEST $I - $T **" if kill -9 $T then log_error "$MYSQLD process hanging, pid $T - killed" else break fi I=`expr $I + 1` done fi if [ -n "$wsrep_restart" ] then if [ $wsrep_restart -le $max_wsrep_restarts ] then wsrep_restart=`expr $wsrep_restart + 1` log_notice "WSREP: sleeping 15 seconds before restart" sleep 15 else log_notice "WSREP: not restarting wsrep node automatically" break fi fi log_notice "mysqld restarted" done log_notice "mysqld from pid file $pid_file ended" galera-3-25.3.20/scripts/mysql/debian/mysql-server-wsrep.list0000644000015300001660000000003513042054732023716 0ustar jenkinsjenkins%include ${MYSQL_MAJOR}.list galera-3-25.3.20/scripts/mysql/debian/debian-start.inc.sh0000755000015300001660000000505513042054732022705 0ustar jenkinsjenkins#!/bin/bash # # This file is included by /etc/mysql/debian-start # ## Check all unclosed tables. # - Requires the server to be up. # - Is supposed to run silently in background. function check_for_crashed_tables() { set -e set -u # But do it in the background to not stall the boot process. logger -p daemon.info -i -t$0 "Triggering myisam-recover for all MyISAM tables" # Checking for $? is unreliable so the size of the output is checked. # Some table handlers like HEAP do not support CHECK TABLE. tempfile=`tempfile` # We have to use xargs in this case, because a for loop barfs on the # spaces in the thing to be looped over. LC_ALL=C $MYSQL --skip-column-names --batch -e ' select concat('\''select count(*) into @discard from `'\'', TABLE_SCHEMA, '\''`.`'\'', TABLE_NAME, '\''`'\'') from information_schema.TABLES where ENGINE='\''MyISAM'\' | \ xargs -i $MYSQL --skip-column-names --silent --batch \ --force -e "{}" >$tempfile if [ -s $tempfile ]; then ( /bin/echo -e "\n" \ "Improperly closed tables are also reported if clients are accessing\n" \ "the tables *now*. A list of current connections is below.\n"; $MYADMIN processlist status ) >> $tempfile # Check for presence as a dependency on mailx would require an MTA. if [ -x /usr/bin/mailx ]; then mailx -e -s"$MYCHECK_SUBJECT" $MYCHECK_RCPT < $tempfile fi (echo "$MYCHECK_SUBJECT"; cat $tempfile) | logger -p daemon.warn -i -t$0 fi rm $tempfile } ## Check for tables needing an upgrade. # - Requires the server to be up. # - Is supposed to run silently in background. function upgrade_system_tables_if_necessary() { set -e set -u logger -p daemon.info -i -t$0 "Upgrading MySQL tables if necessary." # Filter all "duplicate column", "duplicate key" and "unknown column" # errors as the script is designed to be idempotent. LC_ALL=C $MYUPGRADE \ 2>&1 \ | egrep -v '^(1|@had|ERROR (1054|1060|1061))' \ | logger -p daemon.warn -i -t$0 } ## Check for the presence of both, root accounts with and without password. # This might have been caused by a bug related to mysql_install_db (#418672). function check_root_accounts() { set -e set -u logger -p daemon.info -i -t$0 "Checking for insecure root accounts." ret=$( echo "SELECT count(*) FROM mysql.user WHERE user='root' and password='';" | $MYSQL --skip-column-names ) if [ "$ret" -ne "0" ]; then logger -p daemon.warn -i -t$0 "WARNING: mysql.user contains $ret root accounts without password!" fi } galera-3-25.3.20/scripts/mysql/debian/5.1.list0000644000015300001660000002113713042054732020420 0ustar jenkinsjenkins# This is a MySQL-wsrep package description for ESP package manager %include common.list %requires mysql-client-5.1 5.1.41 %provides mysql-server-core-5.1 %provides mysql-server-5.1 ############################################## # Below files sorted into paragraphs # # in the path alphabetical order # ############################################## d 755 root root $BINS_DST - f 755 root root $BINS_DST/msql2mysql $MYSQL_SRC/scripts/msql2mysql f 755 root root $BINS_DST/myisamchk $MYSQL_SRC/storage/myisam/myisamchk f 755 root root $BINS_DST/myisamlog $MYSQL_SRC/storage/myisam/myisamlog f 755 root root $BINS_DST/myisampack $MYSQL_SRC/storage/myisam/myisampack f 755 root root $BINS_DST/mysql_convert_table_format $MYSQL_SRC/scripts/mysql_convert_table_format f 755 root root $BINS_DST/mysql_fix_privilege_tables $MYSQL_SRC/scripts/mysql_fix_privilege_tables f 755 root root $BINS_DST/mysql_install_db $MYSQL_SRC/scripts/mysql_install_db f 755 root root $BINS_DST/mysql_secure_installation $MYSQL_SRC/scripts/mysql_secure_installation f 755 root root $BINS_DST/mysql_setpermission $MYSQL_SRC/scripts/mysql_setpermission f 755 root root $BINS_DST/mysql_tzinfo_to_sql $MYSQL_SRC/sql/mysql_tzinfo_to_sql f 755 root root $BINS_DST/mysql_upgrade $MYSQL_SRC/client/.libs/mysql_upgrade f 755 root root $BINS_DST/mysql_zap $MYSQL_SRC/scripts/mysql_zap f 755 root root $BINS_DST/mysqlbinlog $MYSQL_SRC/client/.libs/mysqlbinlog f 755 root root $BINS_DST/mysqld_multi $MYSQL_SRC/scripts/mysqld_multi f 755 root root $BINS_DST/mysqld_safe $GALERA_SRC/scripts/mysql/debian/mysqld_safe-5.1 f 755 root root $BINS_DST/mysqlhotcopy $MYSQL_SRC/scripts/mysqlhotcopy f 755 root root $BINS_DST/mysqltest $MYSQL_SRC/client/.libs/mysqltest f 755 root root $BINS_DST/replace $MYSQL_SRC/extra/replace f 755 root root $BINS_DST/resolve_stack_dump $MYSQL_SRC/extra/resolve_stack_dump f 755 root root $BINS_DST/resolveip $MYSQL_SRC/extra/resolveip d 755 root root $LIBS_DST - d 755 root root $LIBS_DST/plugin - f 755 root root $LIBS_DST/plugin/ha_innodb_plugin.so.0.0.0 $MYSQL_SRC/storage/innodb_plugin/.libs/ha_innodb_plugin.so.0.0.0 l 000 root root $LIBS_DST/plugin/ha_innodb_plugin.so.0 ha_innodb_plugin.so.0.0.0 l 000 root root $LIBS_DST/plugin/ha_innodb_plugin.so ha_innodb_plugin.so.0.0.0 # /usr/share/doc/... d 755 root root $DOCS_DST - f 644 root root $DOCS_DST/COPYING $MYSQL_SRC/COPYING d 755 root root $DOCS_DST/examples - f 644 root root $DOCS_DST/examples $MYSQL_SRC/support-files/*.cnf # manpages d 755 root root $MAN_DST/man1 - f 644 root root $MAN_DST/man1/innochecksum.1.gz $MYSQL_SRC/man/innochecksum.1.gz f 644 root root $MAN_DST/man1/msql2mysql.1.gz $MYSQL_SRC/man/msql2mysql.1.gz f 644 root root $MAN_DST/man1/myisamchk.1.gz $MYSQL_SRC/man/myisamchk.1.gz f 644 root root $MAN_DST/man1/myisamlog.1.gz $MYSQL_SRC/man/myisamlog.1.gz f 644 root root $MAN_DST/man1/myisampack.1.gz $MYSQL_SRC/man/myisampack.1.gz f 644 root root $MAN_DST/man1/mysql_convert_table_format.1.gz $MYSQL_SRC/man/mysql_convert_table_format.1.gz f 644 root root $MAN_DST/man1/mysql_fix_privilege_tables.1.gz $MYSQL_SRC/man/mysql_fix_privilege_tables.1.gz f 644 root root $MAN_DST/man1/mysql_install_db.1.gz $MYSQL_SRC/man/mysql_install_db.1.gz f 644 root root $MAN_DST/man1/mysql_secure_installation.1.gz $MYSQL_SRC/man/mysql_secure_installation.1.gz f 644 root root $MAN_DST/man1/mysql_setpermission.1.gz $MYSQL_SRC/man/mysql_setpermission.1.gz f 644 root root $MAN_DST/man1/mysql_tzinfo_to_sql.1.gz $MYSQL_SRC/man/mysql_tzinfo_to_sql.1.gz f 644 root root $MAN_DST/man1/mysql_upgrade.1.gz $MYSQL_SRC/man/mysql_upgrade.1.gz f 644 root root $MAN_DST/man1/mysql_zap.1.gz $MYSQL_SRC/man/mysql_zap.1.gz f 644 root root $MAN_DST/man1/mysqlbinlog.1.gz $MYSQL_SRC/man/mysqlbinlog.1.gz f 644 root root $MAN_DST/man1/mysqld_multi.1.gz $MYSQL_SRC/man/mysqld_multi.1.gz f 644 root root $MAN_DST/man1/mysqld_safe.1.gz $MYSQL_SRC/man/mysqld_safe.1.gz f 644 root root $MAN_DST/man1/mysqlhotcopy.1.gz $MYSQL_SRC/man/mysqlhotcopy.1.gz f 644 root root $MAN_DST/man1/mysqltest.1.gz $MYSQL_SRC/man/mysqltest.1.gz l 000 root root $MAN_DST/man1/mysqltest_embedded.1.gz mysqltest.1.gz f 644 root root $MAN_DST/man1/replace.1.gz $MYSQL_SRC/man/replace.1.gz f 644 root root $MAN_DST/man1/resolve_stack_dump.1.gz $MYSQL_SRC/man/resolve_stack_dump.1.gz f 644 root root $MAN_DST/man1/resolveip.1.gz $MYSQL_SRC/man/resolveip.1.gz d 755 root root $MAN_DST/man8 - f 644 root root $MAN_DST/man8/mysqld.8.gz $MYSQL_SRC/man/mysqld.8.gz d 755 root root $SBIN_DST - f 755 root root $SBIN_DST/mysqld $MYSQL_SRC/sql/mysqld d 755 root root $SHAR_DST/mysql-test - l 000 root root $SHAR_DST/mysql-test/mtr mysql-test-run.pl l 000 root root $SHAR_DST/mysql-test/mysql-test-run mysql-test-run.pl f 644 root root $SHAR_DST $MYSQL_SRC/support-files/config.*.ini f 644 root root $SHAR_DST/errmsg.txt $MYSQL_SRC/sql/share/errmsg.txt f 644 root root $SHAR_DST $MYSQL_SRC/scripts/*.sql f 644 root root $SHAR_DST/mysqld_multi.server $MYSQL_SRC/support-files/mysqld_multi.server f 644 root root $SHAR_DST/ndb-config-2-node.ini $MYSQL_SRC/support-files/ndb-config-2-node.ini d 755 root root $SHAR_DST/charsets - f 644 root root $SHAR_DST/charsets $MYSQL_SRC/sql/share/charsets/* $lang00=czech d 755 root root $SHAR_DST/${lang00} - f 644 root root $SHAR_DST/${lang00} $MYSQL_SRC/sql/share/${lang00}/* $lang01=danish d 755 root root $SHAR_DST/${lang01} - f 644 root root $SHAR_DST/${lang01} $MYSQL_SRC/sql/share/${lang01}/* $lang02=dutch d 755 root root $SHAR_DST/${lang02} - f 644 root root $SHAR_DST/${lang02} $MYSQL_SRC/sql/share/${lang02}/* $lang03=english d 755 root root $SHAR_DST/${lang03} - f 644 root root $SHAR_DST/${lang03} $MYSQL_SRC/sql/share/${lang03}/* $lang04=estonian d 755 root root $SHAR_DST/${lang04} - f 644 root root $SHAR_DST/${lang04} $MYSQL_SRC/sql/share/${lang04}/* $lang05=french d 755 root root $SHAR_DST/${lang05} - f 644 root root $SHAR_DST/${lang05} $MYSQL_SRC/sql/share/${lang05}/* $lang06=german d 755 root root $SHAR_DST/${lang06} - f 644 root root $SHAR_DST/${lang06} $MYSQL_SRC/sql/share/${lang06}/* $lang07=greek d 755 root root $SHAR_DST/${lang07} - f 644 root root $SHAR_DST/${lang07} $MYSQL_SRC/sql/share/${lang07}/* $lang08=hungarian d 755 root root $SHAR_DST/${lang08} - f 644 root root $SHAR_DST/${lang08} $MYSQL_SRC/sql/share/${lang08}/* $lang09=italian d 755 root root $SHAR_DST/${lang09} - f 644 root root $SHAR_DST/${lang09} $MYSQL_SRC/sql/share/${lang09}/* $lang10=japanese d 755 root root $SHAR_DST/${lang10} - f 644 root root $SHAR_DST/${lang10} $MYSQL_SRC/sql/share/${lang10}/* $lang11=korean d 755 root root $SHAR_DST/${lang11} - f 644 root root $SHAR_DST/${lang11} $MYSQL_SRC/sql/share/${lang11}/* $lang12=norwegian d 755 root root $SHAR_DST/${lang12} - f 644 root root $SHAR_DST/${lang12} $MYSQL_SRC/sql/share/${lang12}/* $lang13=norwegian-ny d 755 root root $SHAR_DST/${lang13} - f 644 root root $SHAR_DST/${lang13} $MYSQL_SRC/sql/share/${lang13}/* $lang14=polish d 755 root root $SHAR_DST/${lang14} - f 644 root root $SHAR_DST/${lang14} $MYSQL_SRC/sql/share/${lang14}/* $lang15=portuguese d 755 root root $SHAR_DST/${lang15} - f 644 root root $SHAR_DST/${lang15} $MYSQL_SRC/sql/share/${lang15}/* $lang16=romanian d 755 root root $SHAR_DST/${lang16} - f 644 root root $SHAR_DST/${lang16} $MYSQL_SRC/sql/share/${lang16}/* $lang17=russian d 755 root root $SHAR_DST/${lang17} - f 644 root root $SHAR_DST/${lang17} $MYSQL_SRC/sql/share/${lang17}/* $lang18=serbian d 755 root root $SHAR_DST/${lang18} - f 644 root root $SHAR_DST/${lang18} $MYSQL_SRC/sql/share/${lang18}/* $lang19=slovak d 755 root root $SHAR_DST/${lang19} - f 644 root root $SHAR_DST/${lang19} $MYSQL_SRC/sql/share/${lang19}/* $lang20=spanish d 755 root root $SHAR_DST/${lang20} - f 644 root root $SHAR_DST/${lang20} $MYSQL_SRC/sql/share/${lang20}/* $lang21=swedish d 755 root root $SHAR_DST/${lang21} - f 644 root root $SHAR_DST/${lang21} $MYSQL_SRC/sql/share/${lang21}/* $lang22=ukrainian d 755 root root $SHAR_DST/${lang22} - f 644 root root $SHAR_DST/${lang22} $MYSQL_SRC/sql/share/${lang22}/* #d 755 mysql root /var/run/mysqld - #d 755 mysql adm /var/log/mysql - d 755 root root /var/lib/mysql-upgrade - # galera-3-25.3.20/scripts/mysql/debian/mysqld_safe-5.10000755000015300001660000004312613042054732021760 0ustar jenkinsjenkins#!/bin/sh # Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB # This file is public domain and comes with NO WARRANTY of any kind # # Script to start the MySQL daemon and restart it if it dies unexpectedly # # This should be executed in the MySQL base directory if you are using a # binary installation that is not installed in its compile-time default # location # # mysql.server works by first doing a cd to the base directory and from there # executing mysqld_safe KILL_MYSQLD=1; MYSQLD= niceness=0 # Initial logging status: error log is not open, and not using syslog logging=init want_syslog=0 syslog_tag= user='mysql' pid_file= err_log= syslog_tag_mysqld=mysqld syslog_tag_mysqld_safe=mysqld_safe umask 007 defaults= case "$1" in --no-defaults|--defaults-file=*|--defaults-extra-file=*) defaults="$1"; shift ;; esac usage () { cat <> "$err_log" ;; syslog) logger -t "$syslog_tag_mysqld_safe" -p "$priority" "$*" ;; *) echo "Internal program error (non-fatal):" \ " unknown logging method '$logging'" >&2 ;; esac } log_error () { log_generic daemon.error "$@" >&2 } log_notice () { log_generic daemon.notice "$@" } eval_log_error () { cmd="$1" case $logging in file) cmd="$cmd >> "`shell_quote_string "$err_log"`" 2>&1" ;; syslog) # mysqld often prefixes its messages with a timestamp, which is # redundant when logging to syslog (which adds its own timestamp) # However, we don't strip the timestamp with sed here, because # sed buffers output (only GNU sed supports a -u (unbuffered) option) # which means that messages may not get sent to syslog until the # mysqld process quits. cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error & wait" ;; *) echo "Internal program error (non-fatal):" \ " unknown logging method '$logging'" >&2 ;; esac #echo "Running mysqld: [$cmd]" eval "$cmd" } shell_quote_string() { # This sed command makes sure that any special chars are quoted, # so the arg gets passed exactly to the server. echo "$1" | sed -e 's,\([^a-zA-Z0-9/_.=-]\),\\\1,g' } wsrep_pick_url() { [ $# -eq 0 ] return 0 if ! which nc >/dev/null; then log_error "ERROR: nc tool not found in PATH! Make sure you have it installed." return 1 fi local url # Assuming URL in the form scheme://host:port # If host and port are not NULL, the liveness of URL is assumed to be tested # If port part is absent, the url is returned literally and unconditionally # If every URL has port but none is reachable, nothing is returned for url in `echo $@ | sed s/,/\ /g` 0; do local host=`echo $url | cut -d \: -f 2 | sed s/^\\\/\\\///` local port=`echo $url | cut -d \: -f 3` [ -z "$port" ] && break nc -z "$host" $port >/dev/null && break done if [ "$url" == "0" ]; then log_error "ERROR: none of the URLs in '$@' is reachable." return 1 fi echo $url } parse_arguments() { # We only need to pass arguments through to the server if we don't # handle them here. So, we collect unrecognized options (passed on # the command line) into the args variable. pick_args= if test "$1" = PICK-ARGS-FROM-ARGV then pick_args=1 shift fi for arg do val=`echo "$arg" | sed -e "s;--[^=]*=;;"` case "$arg" in # these get passed explicitly to mysqld --basedir=*) MY_BASEDIR_VERSION="$val" ;; --datadir=*) DATADIR="$val" ;; --pid-file=*) pid_file="$val" ;; --user=*) user="$val"; SET_USER=1 ;; # these might have been set in a [mysqld_safe] section of my.cnf # they are added to mysqld command line to override settings from my.cnf --log-error=*) err_log="$val" ;; --port=*) mysql_tcp_port="$val" ;; --socket=*) mysql_unix_port="$val" ;; # mysqld_safe-specific options - must be set in my.cnf ([mysqld_safe])! --core-file-size=*) core_file_size="$val" ;; --ledir=*) ledir="$val" ;; --mysqld=*) MYSQLD="$val" ;; --mysqld-version=*) if test -n "$val" then MYSQLD="mysqld-$val" else MYSQLD="mysqld" fi ;; --nice=*) niceness="$val" ;; --open-files-limit=*) open_files="$val" ;; --open_files_limit=*) open_files="$val" ;; --skip-kill-mysqld*) KILL_MYSQLD=0 ;; --syslog) want_syslog=1 ;; --skip-syslog) want_syslog=0 ;; --syslog-tag=*) syslog_tag="$val" ;; --timezone=*) TZ="$val"; export TZ; ;; --wsrep[-_]urls=*) wsrep_urls="$val"; ;; --help) usage ;; *) if test -n "$pick_args" then append_arg_to_args "$arg" fi ;; esac done } # # First, try to find BASEDIR and ledir (where mysqld is) # if echo '/usr/share/mysql' | grep '^/usr' > /dev/null then relpkgdata=`echo '/usr/share/mysql' | sed -e 's,^/usr,,' -e 's,^/,,' -e 's,^,./,'` else # pkgdatadir is not relative to prefix relpkgdata='/usr/share/mysql' fi MY_PWD=`pwd` # Check for the directories we would expect from a binary release install if test -n "$MY_BASEDIR_VERSION" -a -d "$MY_BASEDIR_VERSION" then # BASEDIR is already overridden on command line. Do not re-set. # Use BASEDIR to discover le. if test -x "$MY_BASEDIR_VERSION/libexec/mysqld" then ledir="$MY_BASEDIR_VERSION/libexec" else ledir="$MY_BASEDIR_VERSION/bin" fi elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/bin/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where bin, share and data are ledir="$MY_PWD/bin" # Where mysqld is # Check for the directories we would expect from a source install elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/libexec/mysqld" then MY_BASEDIR_VERSION="$MY_PWD" # Where libexec, share and var are ledir="$MY_PWD/libexec" # Where mysqld is # Since we didn't find anything, used the compiled-in defaults else MY_BASEDIR_VERSION='/usr' ledir='/usr/sbin' fi # # Second, try to find the data directory # # Try where the binary installs put it if test -d $MY_BASEDIR_VERSION/data/mysql then DATADIR=$MY_BASEDIR_VERSION/data if test -z "$defaults" -a -r "$DATADIR/my.cnf" then defaults="--defaults-extra-file=$DATADIR/my.cnf" fi # Next try where the source installs put it elif test -d $MY_BASEDIR_VERSION/var/mysql then DATADIR=$MY_BASEDIR_VERSION/var # Or just give up and use our compiled-in default else DATADIR=/var/lib/mysql fi if test -z "$MYSQL_HOME" then if test -r "$MY_BASEDIR_VERSION/my.cnf" && test -r "$DATADIR/my.cnf" then log_error "WARNING: Found two instances of my.cnf - $MY_BASEDIR_VERSION/my.cnf and $DATADIR/my.cnf IGNORING $DATADIR/my.cnf" MYSQL_HOME=$MY_BASEDIR_VERSION elif test -r "$DATADIR/my.cnf" then log_error "WARNING: Found $DATADIR/my.cnf The data directory is a deprecated location for my.cnf, please move it to $MY_BASEDIR_VERSION/my.cnf" MYSQL_HOME=$DATADIR else MYSQL_HOME=$MY_BASEDIR_VERSION fi fi export MYSQL_HOME # Get first arguments from the my.cnf file, groups [mysqld] and [mysqld_safe] # and then merge with the command line arguments if test -x "$MY_BASEDIR_VERSION/bin/my_print_defaults" then print_defaults="$MY_BASEDIR_VERSION/bin/my_print_defaults" elif test -x ./bin/my_print_defaults then print_defaults="./bin/my_print_defaults" elif test -x /usr/bin/my_print_defaults then print_defaults="/usr/bin/my_print_defaults" elif test -x /usr/bin/mysql_print_defaults then print_defaults="/usr/bin/mysql_print_defaults" else print_defaults="my_print_defaults" fi append_arg_to_args () { args="$args "`shell_quote_string "$1"` } args= SET_USER=2 parse_arguments `$print_defaults $defaults --loose-verbose mysqld server` if test $SET_USER -eq 2 then SET_USER=0 fi parse_arguments `$print_defaults $defaults --loose-verbose mysqld_safe safe_mysqld` parse_arguments PICK-ARGS-FROM-ARGV "$@" # Determine what logging facility to use # Ensure that 'logger' exists, if it's requested if [ $want_syslog -eq 1 ] then my_which logger > /dev/null 2>&1 if [ $? -ne 0 ] then log_error "--syslog requested, but no 'logger' program found. Please ensure that 'logger' is in your PATH, or do not specify the --syslog option to mysqld_safe." exit 1 fi fi if [ -n "$err_log" -o $want_syslog -eq 0 ] then if [ -n "$err_log" ] then # mysqld adds ".err" if there is no extension on the --log-error # argument; must match that here, or mysqld_safe will write to a # different log file than mysqld # mysqld does not add ".err" to "--log-error=foo."; it considers a # trailing "." as an extension if expr "$err_log" : '.*\.[^/]*$' > /dev/null then : else err_log="$err_log".err fi case "$err_log" in /* ) ;; * ) err_log="$DATADIR/$err_log" ;; esac else err_log=$DATADIR/`/bin/hostname`.err fi append_arg_to_args "--log-error=$err_log" if [ $want_syslog -eq 1 ] then # User explicitly asked for syslog, so warn that it isn't used log_error "Can't log to error log and syslog at the same time. Remove all --log-error configuration options for --syslog to take effect." fi # Log to err_log file log_notice "Logging to '$err_log'." logging=file else if [ -n "$syslog_tag" ] then # Sanitize the syslog tag syslog_tag=`echo "$syslog_tag" | sed -e 's/[^a-zA-Z0-9_-]/_/g'` syslog_tag_mysqld_safe="${syslog_tag_mysqld_safe}-$syslog_tag" syslog_tag_mysqld="${syslog_tag_mysqld}-$syslog_tag" fi log_notice "Logging to syslog." logging=syslog fi USER_OPTION="" if test -w / -o "$USER" = "root" then if test "$user" != "root" -o $SET_USER = 1 then USER_OPTION="--user=$user" fi # Change the err log to the right user, if it is in use if [ $want_syslog -eq 0 ]; then touch "$err_log" chown $user "$err_log" fi if test -n "$open_files" then ulimit -n $open_files fi fi if test -n "$open_files" then append_arg_to_args "--open-files-limit=$open_files" fi safe_mysql_unix_port=${mysql_unix_port:-${MYSQL_UNIX_PORT:-/var/run/mysqld/mysqld.sock}} # Make sure that directory for $safe_mysql_unix_port exists mysql_unix_port_dir=`dirname $safe_mysql_unix_port` if [ ! -d $mysql_unix_port_dir ] then mkdir $mysql_unix_port_dir chown $user $mysql_unix_port_dir chmod 755 $mysql_unix_port_dir fi # If the user doesn't specify a binary, we assume name "mysqld" if test -z "$MYSQLD" then MYSQLD=mysqld fi if test ! -x "$ledir/$MYSQLD" then log_error "The file $ledir/$MYSQLD does not exist or is not executable. Please cd to the mysql installation directory and restart this script from there as follows: ./bin/mysqld_safe& See http://dev.mysql.com/doc/mysql/en/mysqld-safe.html for more information" exit 1 fi if test -z "$pid_file" then pid_file="$DATADIR/`/bin/hostname`.pid" else case "$pid_file" in /* ) ;; * ) pid_file="$DATADIR/$pid_file" ;; esac fi append_arg_to_args "--pid-file=$pid_file" if test -n "$mysql_unix_port" then append_arg_to_args "--socket=$mysql_unix_port" fi if test -n "$mysql_tcp_port" then append_arg_to_args "--port=$mysql_tcp_port" fi if test $niceness -eq 0 then NOHUP_NICENESS="nohup" else NOHUP_NICENESS="nohup nice -$niceness" fi # Using nice with no args to get the niceness level is GNU-specific. # This check could be extended for other operating systems (e.g., # BSD could use "nohup sh -c 'ps -o nice -p $$' | tail -1"). # But, it also seems that GNU nohup is the only one which messes # with the priority, so this is okay. if nohup nice > /dev/null 2>&1 then normal_niceness=`nice` nohup_niceness=`nohup nice 2>/dev/null` numeric_nice_values=1 for val in $normal_niceness $nohup_niceness do case "$val" in -[0-9] | -[0-9][0-9] | -[0-9][0-9][0-9] | \ [0-9] | [0-9][0-9] | [0-9][0-9][0-9] ) ;; * ) numeric_nice_values=0 ;; esac done if test $numeric_nice_values -eq 1 then nice_value_diff=`expr $nohup_niceness - $normal_niceness` if test $? -eq 0 && test $nice_value_diff -gt 0 && \ nice --$nice_value_diff echo testing > /dev/null 2>&1 then # nohup increases the priority (bad), and we are permitted # to lower the priority with respect to the value the user # might have been given niceness=`expr $niceness - $nice_value_diff` NOHUP_NICENESS="nice -$niceness nohup" fi fi else if nohup echo testing > /dev/null 2>&1 then : else # nohup doesn't work on this system NOHUP_NICENESS="" fi fi # Try to set the core file size (even if we aren't root) because many systems # don't specify a hard limit on core file size. if test -n "$core_file_size" then ulimit -c $core_file_size fi # # If there exists an old pid file, check if the daemon is already running # Note: The switches to 'ps' may depend on your operating system if test -f "$pid_file" then PID=`cat "$pid_file"` if /bin/kill -0 $PID > /dev/null 2> /dev/null then if /bin/ps wwwp $PID | grep -v " grep" | grep -v mysqld_safe | grep -- "$MYSQLD" > /dev/null then # The pid contains a mysqld process log_error "A mysqld process already exists" exit 1 fi fi rm -f "$pid_file" if test -f "$pid_file" then log_error "Fatal error: Can't remove the pid file: $pid_file Please remove it manually and start $0 again; mysqld daemon not started" exit 1 fi fi # # From now on, we catch signals to do a proper shutdown of mysqld # when signalled to do so. # trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf refresh & wait' 1 # HUP trap '/usr/bin/mysqladmin --defaults-extra-file=/etc/mysql/debian.cnf shutdown' 2 3 15 # INT QUIT and TERM # # Uncomment the following lines if you want all tables to be automatically # checked and repaired during startup. You should add sensible key_buffer # and sort_buffer values to my.cnf to improve check performance or require # less disk space. # Alternatively, you can start mysqld with the "myisam-recover" option. See # the manual for details. # # echo "Checking tables in $DATADIR" # $MY_BASEDIR_VERSION/bin/myisamchk --silent --force --fast --medium-check $DATADIR/*/*.MYI # $MY_BASEDIR_VERSION/bin/isamchk --silent --force $DATADIR/*/*.ISM # Does this work on all systems? #if type ulimit | grep "shell builtin" > /dev/null #then # ulimit -n 256 > /dev/null 2>&1 # Fix for BSD and FreeBSD systems #fi cmd="$NOHUP_NICENESS" for i in "$ledir/$MYSQLD" "$defaults" "--basedir=$MY_BASEDIR_VERSION" \ "--datadir=$DATADIR" "$USER_OPTION" do cmd="$cmd "`shell_quote_string "$i"` done cmd="$cmd $args" # Avoid 'nohup: ignoring input' warning test -n "$NOHUP_NICENESS" && cmd="$cmd < /dev/null" log_notice "Starting $MYSQLD daemon with databases from $DATADIR" while true do rm -f $safe_mysql_unix_port "$pid_file" # Some extra safety [ -n "$wsrep_urls" ] && url=`wsrep_pick_url $wsrep_urls` # check connect address if [ -z "$url" ] then eval_log_error "$cmd" else eval_log_error "$cmd --wsrep_cluster_address=$url" fi if test ! -f "$pid_file" # This is removed if normal shutdown then break fi if true && test $KILL_MYSQLD -eq 1 then # Test if one process was hanging. # This is only a fix for Linux (running as base 3 mysqld processes) # but should work for the rest of the servers. # The only thing is ps x => redhat 5 gives warnings when using ps -x. # kill -9 is used or the process won't react on the kill. numofproces=`ps xaww | grep -v "grep" | grep "$ledir/$MYSQLD\>" | grep -c "pid-file=$pid_file"` log_notice "Number of processes running now: $numofproces" I=1 while test "$I" -le "$numofproces" do PROC=`ps xaww | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "pid-file=$pid_file" | sed -n '$p'` for T in $PROC do break done # echo "TEST $I - $T **" if kill -9 $T then log_error "$MYSQLD process hanging, pid $T - killed" else break fi I=`expr $I + 1` done fi log_notice "mysqld restarted" done log_notice "mysqld from pid file $pid_file ended" galera-3-25.3.20/scripts/mysql/debian/my.cnf0000644000015300001660000001012713042054732020332 0ustar jenkinsjenkins# # The MySQL database server configuration file. # # You can copy this to one of: # - "/etc/mysql/my.cnf" to set global options, # - "~/.my.cnf" to set user-specific options. # # One can use all long options that the program supports. # Run program with --help to get a list of available options and with # --print-defaults to see which it would actually understand and use. # # For explanations see # http://dev.mysql.com/doc/mysql/en/server-system-variables.html # This will be passed to all mysql clients # It has been reported that passwords should be enclosed with ticks/quotes # escpecially if they contain "#" chars... # Remember to edit /etc/mysql/debian.cnf when changing the socket location. [client] port = 3306 socket = /var/run/mysqld/mysqld.sock # Here is entries for some specific programs # The following values assume you have at least 32M ram # This was formally known as [safe_mysqld]. Both versions are currently parsed. [mysqld_safe] socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] # # * Basic Settings # # # * IMPORTANT # If you make changes to these settings and your system uses apparmor, you may # also need to also adjust /etc/apparmor.d/usr.sbin.mysqld. # user = mysql pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock port = 3306 basedir = /usr datadir = /var/lib/mysql tmpdir = /tmp skip-external-locking # # Instead of skip-networking the default is now to listen only on # localhost which is more compatible and is not less secure. # WSREP NOTE: for state transfer to work, bind-address should be an address # of external interface! If you define it explicitely, see SST # options in /etc/mysql/conf.d/wsrep.cnf #bind-address = 127.0.0.1 # # * Fine Tuning # key_buffer = 16M max_allowed_packet = 16M thread_stack = 192K thread_cache_size = 8 # This replaces the startup script and checks MyISAM tables if needed # the first time they are touched myisam-recover = BACKUP #max_connections = 100 #table_cache = 64 #thread_concurrency = 10 # # * Query Cache Configuration # query_cache_limit = 1M query_cache_size = 16M # # * Logging and Replication # # Both location gets rotated by the cronjob. # Be aware that this log type is a performance killer. # As of 5.1 you can enable the log at runtime! #general_log_file = /var/log/mysql/mysql.log #general_log = 1 # # Error logging goes to syslog due to /etc/mysql/conf.d/mysqld_safe_syslog.cnf. # # Here you can see queries with especially long duration #log_slow_queries = /var/log/mysql/mysql-slow.log #long_query_time = 2 #log-queries-not-using-indexes # # The following can be used as easy to replay backup logs or for replication. # note: if you are setting up a replication slave, see README.Debian about # other settings you may need to change. # WSREP NOTE: traditional MySQL replication and binlogging is not supported # and untested with this patch. Some binlog options may cause mysqld # crash. #server-id = 1 #log_bin = /var/log/mysql/mysql-bin.log #expire_logs_days = 10 #max_binlog_size = 100M #binlog_do_db = include_database_name #binlog_ignore_db = include_database_name # # * InnoDB # # InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. # Read the manual for more InnoDB related options. There are many! # WSREP NOTE: check /etc/mysql/conf.d/wsrep.cnf for some mandatory InnoDB # options. Don't try to override them! # # * Security Features # # Read the manual, too, if you want chroot! # chroot = /var/lib/mysql/ # # For generating SSL certificates I recommend the OpenSSL GUI "tinyca". # # ssl-ca=/etc/mysql/cacert.pem # ssl-cert=/etc/mysql/server-cert.pem # ssl-key=/etc/mysql/server-key.pem [mysqldump] quick quote-names max_allowed_packet = 16M [mysql] #no-auto-rehash # faster start of mysql but no tab completition [isamchk] key_buffer = 16M # # * IMPORTANT: Additional settings that can override those from this file! # The files must end with '.cnf', otherwise they'll be ignored. # WSREP NOTE: additional wsrep configuration is in wsrep.cnf # !includedir /etc/mysql/conf.d/ galera-3-25.3.20/scripts/mysql/debian/5.5.list0000644000015300001660000002523213042054732020424 0ustar jenkinsjenkins# This is a MySQL-wsrep package description for ESP package manager %include common.list %requires libaio1 %requires mysql-client %replaces mysql-server-core-5.5 0.0.0 ${mysql_version} %replaces mysql-server-5.5 0.0.0 ${mysql_version} %provides mysql-server-core-5.5 %provides mysql-server-5.5 ############################################## # Below files sorted into paragraphs # # in the path alphabetical order # ############################################## d 755 root root $BINS_DST - #in client f 755 root root $BINS_DST/innochecksum $MYSQL_SRC/extra/innochecksum f 755 root root $BINS_DST/msql2mysql $MYSQL_SRC/scripts/msql2mysql #in client f 755 root root $BINS_DST/my_print_defaults $MYSQL_SRC/extra/my_print_defaults f 755 root root $BINS_DST/myisamchk $MYSQL_SRC/storage/myisam/myisamchk f 755 root root $BINS_DST/myisamlog $MYSQL_SRC/storage/myisam/myisamlog f 755 root root $BINS_DST/myisampack $MYSQL_SRC/storage/myisam/myisampack f 755 root root $BINS_DST/mysql_convert_table_format $MYSQL_SRC/scripts/mysql_convert_table_format #f 755 root root $BINS_DST/mysql_fix_privilege_tables $MYSQL_SRC/scripts/mysql_fix_privilege_tables f 755 root root $BINS_DST/mysql_install_db $MYSQL_SRC/scripts/mysql_install_db f 755 root root $BINS_DST/mysql_secure_installation $MYSQL_SRC/scripts/mysql_secure_installation f 755 root root $BINS_DST/mysql_setpermission $MYSQL_SRC/scripts/mysql_setpermission f 755 root root $BINS_DST/mysql_tzinfo_to_sql $MYSQL_SRC/sql/mysql_tzinfo_to_sql f 755 root root $BINS_DST/mysql_upgrade $MYSQL_SRC/client/mysql_upgrade f 755 root root $BINS_DST/mysql_zap $MYSQL_SRC/scripts/mysql_zap f 755 root root $BINS_DST/mysqlbinlog $MYSQL_SRC/client/mysqlbinlog f 755 root root $BINS_DST/mysqld_multi $MYSQL_SRC/scripts/mysqld_multi f 755 root root $BINS_DST/mysqld_safe $GALERA_SRC/scripts/mysql/debian/mysqld_safe-5.5 #f 755 root root $BINS_DST/mysqld_safe $MYSQL_SRC/scripts/mysqld_safe f 755 root root $BINS_DST/mysqlhotcopy $MYSQL_SRC/scripts/mysqlhotcopy f 755 root root $BINS_DST/mysqltest $MYSQL_SRC/client/mysqltest #in client f 755 root root $BINS_DST/perror $MYSQL_SRC/extra/perror f 755 root root $BINS_DST/replace $MYSQL_SRC/extra/replace f 755 root root $BINS_DST/resolve_stack_dump $MYSQL_SRC/extra/resolve_stack_dump f 755 root root $BINS_DST/resolveip $MYSQL_SRC/extra/resolveip f 755 root root $BINS_DST/wsrep_sst_xtrabackup $MYSQL_SRC/scripts/wsrep_sst_xtrabackup d 755 root root $LIBS_DST - d 755 root root $PLUGIN_DST - f 755 root root $PLUGIN_DST/adt_null.so $MYSQL_SRC/plugin/audit_null/adt_null.so f 755 root root $PLUGIN_DST/auth.so $MYSQL_SRC/plugin/auth/auth.so f 755 root root $PLUGIN_DST/auth_socket.so $MYSQL_SRC/plugin/auth/auth_socket.so f 755 root root $PLUGIN_DST/auth_test_plugin.so $MYSQL_SRC/plugin/auth/auth_test_plugin.so f 755 root root $PLUGIN_DST/libdaemon_example.so $MYSQL_SRC/plugin/daemon_example/libdaemon_example.so f 755 root root $PLUGIN_DST/mypluglib.so $MYSQL_SRC/plugin/fulltext/mypluglib.so f 755 root root $PLUGIN_DST/qa_auth_client.so $MYSQL_SRC/plugin/auth/qa_auth_client.so f 755 root root $PLUGIN_DST/qa_auth_interface.so $MYSQL_SRC/plugin/auth/qa_auth_interface.so f 755 root root $PLUGIN_DST/qa_auth_server.so $MYSQL_SRC/plugin/auth/qa_auth_server.so f 755 root root $PLUGIN_DST/semisync_master.so $MYSQL_SRC/plugin/semisync/semisync_master.so f 755 root root $PLUGIN_DST/semisync_slave.so $MYSQL_SRC/plugin/semisync/semisync_slave.so d 755 root root $SBIN_DST - f 755 root root $SBIN_DST/$MYSQLD_BINARY $MYSQL_SRC/sql/$MYSQLD_BINARY # /usr/share/doc/... d 755 root root $DOCS_DST - f 644 root root $DOCS_DST/COPYING $MYSQL_SRC/COPYING f 644 root root $DOCS_DST/ChangeLog $MYSQL_SRC/Docs/ChangeLog f 644 root root $DOCS_DST/INFO_BIN $MYSQL_SRC/Docs/INFO_BIN f 644 root root $DOCS_DST/INFO_SRC $MYSQL_SRC/Docs/INFO_SRC f 644 root root $DOCS_DST/INSTALL-BINARY $MYSQL_SRC/Docs/INSTALL-BINARY f 644 root root $DOCS_DST/README $MYSQL_SRC/README f 644 root root $DOCS_DST/myisam.txt $MYSQL_SRC/Docs/myisam.txt # f 644 root root $DOCS_DST/mysql.info $MYSQL_SRC/Docs/mysql.info f 644 root root $DOCS_DST/sp-imp-spec.txt $MYSQL_SRC/Docs/sp-imp-spec.txt # manpages d 755 root root $MAN_DST/man1 - f 644 root root $MAN_DST/man1/comp_err.1.gz $MYSQL_SRC/man/comp_err.1.gz #in client f 644 root root $MAN_DST/man1/innochecksum.1.gz $MYSQL_SRC/man/innochecksum.1.gz f 644 root root $MAN_DST/man1/msql2mysql.1.gz $MYSQL_SRC/man/msql2mysql.1.gz #in client f 644 root root $MAN_DST/man1/my_print_defaults.1.gz $MYSQL_SRC/man/my_print_defaults.1.gz f 644 root root $MAN_DST/man1/myisamchk.1.gz $MYSQL_SRC/man/myisamchk.1.gz f 644 root root $MAN_DST/man1/myisamlog.1.gz $MYSQL_SRC/man/myisamlog.1.gz f 644 root root $MAN_DST/man1/myisampack.1.gz $MYSQL_SRC/man/myisampack.1.gz f 644 root root $MAN_DST/man1/mysql.server.1.gz $MYSQL_SRC/man/mysql.server.1.gz f 644 root root $MAN_DST/man1/mysql_convert_table_format.1.gz $MYSQL_SRC/man/mysql_convert_table_format.1.gz #f 644 root root $MAN_DST/man1/mysql_fix_privilege_tables.1.gz $MYSQL_SRC/man/mysql_fix_privilege_tables.1.gz f 644 root root $MAN_DST/man1/mysql_install_db.1.gz $MYSQL_SRC/man/mysql_install_db.1.gz f 644 root root $MAN_DST/man1/mysql_secure_installation.1.gz $MYSQL_SRC/man/mysql_secure_installation.1.gz f 644 root root $MAN_DST/man1/mysql_setpermission.1.gz $MYSQL_SRC/man/mysql_setpermission.1.gz f 644 root root $MAN_DST/man1/mysql_tzinfo_to_sql.1.gz $MYSQL_SRC/man/mysql_tzinfo_to_sql.1.gz f 644 root root $MAN_DST/man1/mysql_upgrade.1.gz $MYSQL_SRC/man/mysql_upgrade.1.gz f 644 root root $MAN_DST/man1/mysql_zap.1.gz $MYSQL_SRC/man/mysql_zap.1.gz f 644 root root $MAN_DST/man1/mysqlbinlog.1.gz $MYSQL_SRC/man/mysqlbinlog.1.gz f 644 root root $MAN_DST/man1/mysqld_multi.1.gz $MYSQL_SRC/man/mysqld_multi.1.gz f 644 root root $MAN_DST/man1/mysqld_safe.1.gz $MYSQL_SRC/man/mysqld_safe.1.gz f 644 root root $MAN_DST/man1/mysqlhotcopy.1.gz $MYSQL_SRC/man/mysqlhotcopy.1.gz f 644 root root $MAN_DST/man1/mysqltest.1.gz $MYSQL_SRC/man/mysqltest.1.gz #in client f 644 root root $MAN_DST/man1/perror.1.gz $MYSQL_SRC/man/perror.1.gz f 644 root root $MAN_DST/man1/replace.1.gz $MYSQL_SRC/man/replace.1.gz f 644 root root $MAN_DST/man1/resolve_stack_dump.1.gz $MYSQL_SRC/man/resolve_stack_dump.1.gz f 644 root root $MAN_DST/man1/resolveip.1.gz $MYSQL_SRC/man/resolveip.1.gz d 755 root root $MAN_DST/man8 - f 644 root root $MAN_DST/man8/mysqld.8.gz $MYSQL_SRC/man/mysqld.8.gz f 644 root root $SHAR_DST $MYSQL_SRC/support-files/config.*.ini f 644 root root $SHAR_DST $MYSQL_SRC/scripts/*.sql f 644 root root $SHAR_DST/binary-configure $MYSQL_SRC/support-files/binary-configure f 644 root root $SHAR_DST/errmsg-utf8.txt $MYSQL_SRC/sql/share/errmsg-utf8.txt f 644 root root $SHAR_DST $MYSQL_SRC/support-files/my-*.cnf f 644 root root $SHAR_DST/mysql-log-rotate $MYSQL_SRC/support-files/mysql-log-rotate f 755 root root $SHAR_DST/mysql.server $MYSQL_SRC/support-files/mysql.server f 644 root root $SHAR_DST/mysqld_multi.server $MYSQL_SRC/support-files/mysqld_multi.server # f 644 root root $SHAR_DST/ndb-config-2-node.ini $MYSQL_SRC/support-files/ndb-config-2-node.ini d 755 root root $SHAR_DST/charsets - f 644 root root $SHAR_DST/charsets $MYSQL_SRC/sql/share/charsets/* $lang00=czech d 755 root root $SHAR_DST/${lang00} - f 644 root root $SHAR_DST/${lang00} $MYSQL_SRC/sql/share/${lang00}/* $lang01=danish d 755 root root $SHAR_DST/${lang01} - f 644 root root $SHAR_DST/${lang01} $MYSQL_SRC/sql/share/${lang01}/* $lang02=dutch d 755 root root $SHAR_DST/${lang02} - f 644 root root $SHAR_DST/${lang02} $MYSQL_SRC/sql/share/${lang02}/* $lang03=english d 755 root root $SHAR_DST/${lang03} - f 644 root root $SHAR_DST/${lang03} $MYSQL_SRC/sql/share/${lang03}/* $lang04=estonian d 755 root root $SHAR_DST/${lang04} - f 644 root root $SHAR_DST/${lang04} $MYSQL_SRC/sql/share/${lang04}/* $lang05=french d 755 root root $SHAR_DST/${lang05} - f 644 root root $SHAR_DST/${lang05} $MYSQL_SRC/sql/share/${lang05}/* $lang06=german d 755 root root $SHAR_DST/${lang06} - f 644 root root $SHAR_DST/${lang06} $MYSQL_SRC/sql/share/${lang06}/* $lang07=greek d 755 root root $SHAR_DST/${lang07} - f 644 root root $SHAR_DST/${lang07} $MYSQL_SRC/sql/share/${lang07}/* $lang08=hungarian d 755 root root $SHAR_DST/${lang08} - f 644 root root $SHAR_DST/${lang08} $MYSQL_SRC/sql/share/${lang08}/* $lang09=italian d 755 root root $SHAR_DST/${lang09} - f 644 root root $SHAR_DST/${lang09} $MYSQL_SRC/sql/share/${lang09}/* $lang10=japanese d 755 root root $SHAR_DST/${lang10} - f 644 root root $SHAR_DST/${lang10} $MYSQL_SRC/sql/share/${lang10}/* $lang11=korean d 755 root root $SHAR_DST/${lang11} - f 644 root root $SHAR_DST/${lang11} $MYSQL_SRC/sql/share/${lang11}/* $lang12=norwegian d 755 root root $SHAR_DST/${lang12} - f 644 root root $SHAR_DST/${lang12} $MYSQL_SRC/sql/share/${lang12}/* $lang13=norwegian-ny d 755 root root $SHAR_DST/${lang13} - f 644 root root $SHAR_DST/${lang13} $MYSQL_SRC/sql/share/${lang13}/* $lang14=polish d 755 root root $SHAR_DST/${lang14} - f 644 root root $SHAR_DST/${lang14} $MYSQL_SRC/sql/share/${lang14}/* $lang15=portuguese d 755 root root $SHAR_DST/${lang15} - f 644 root root $SHAR_DST/${lang15} $MYSQL_SRC/sql/share/${lang15}/* $lang16=romanian d 755 root root $SHAR_DST/${lang16} - f 644 root root $SHAR_DST/${lang16} $MYSQL_SRC/sql/share/${lang16}/* $lang17=russian d 755 root root $SHAR_DST/${lang17} - f 644 root root $SHAR_DST/${lang17} $MYSQL_SRC/sql/share/${lang17}/* $lang18=serbian d 755 root root $SHAR_DST/${lang18} - f 644 root root $SHAR_DST/${lang18} $MYSQL_SRC/sql/share/${lang18}/* $lang19=slovak d 755 root root $SHAR_DST/${lang19} - f 644 root root $SHAR_DST/${lang19} $MYSQL_SRC/sql/share/${lang19}/* $lang20=spanish d 755 root root $SHAR_DST/${lang20} - f 644 root root $SHAR_DST/${lang20} $MYSQL_SRC/sql/share/${lang20}/* $lang21=swedish d 755 root root $SHAR_DST/${lang21} - f 644 root root $SHAR_DST/${lang21} $MYSQL_SRC/sql/share/${lang21}/* $lang22=ukrainian d 755 root root $SHAR_DST/${lang22} - f 644 root root $SHAR_DST/${lang22} $MYSQL_SRC/sql/share/${lang22}/* d 755 mysql root /var/run/mysqld - # galera-3-25.3.20/scripts/mysql/debian/mysql0000755000015300001660000001374313042054732020317 0ustar jenkinsjenkins#!/bin/bash # ### BEGIN INIT INFO # Provides: mysql # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Should-Start: $network $named $time # Should-Stop: $network $named $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Start and stop the mysql database server daemon # Description: Controls the main MySQL database server daemon "mysqld" # and its wrapper script "mysqld_safe". ### END INIT INFO # set -e set -u ${DEBIAN_SCRIPT_DEBUG:+ set -v -x} test -x /usr/sbin/mysqld || exit 0 . /lib/lsb/init-functions SELF=$(cd $(dirname $0); pwd -P)/$(basename $0) CONF=/etc/mysql/my.cnf # MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" # priority can be overriden and "-s" adds output to stderr ERR_LOGGER="logger -p daemon.err -t /etc/init.d/mysql -i" # Safeguard (relative paths, core dumps..) cd / umask 077 # mysqladmin likes to read /root/.my.cnf. This is usually not what I want # as many admins e.g. only store a password without a username there and # so break my scripts. export HOME=/etc/mysql/ ## Fetch a particular option from mysql's invocation. # # Usage: void mysqld_get_param option mysqld_get_param() { /usr/sbin/mysqld --print-defaults \ | tr " " "\n" \ | grep -- "--$1" \ | tail -n 1 \ | cut -d= -f2 } # Determine parameters once per script invocation datadir=`mysqld_get_param datadir` [ -z "$datadir" ] && datadir="/var/lib/mysql" pidfile=`mysqld_get_param pid-file` [ -z "$pidfile" ] && pidfile="$datadir/$(hostname).pid" ## Do some sanity checks before even trying to start mysqld. sanity_checks() { # check for config file if [ ! -r /etc/mysql/my.cnf ]; then log_warning_msg "$0: WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" echo "WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" | $ERR_LOGGER fi # check for diskspace shortage if LC_ALL=C BLOCKSIZE= df --portability $datadir/. | tail -n 1 | awk '{ exit ($4>4096) }'; then log_failure_msg "$0: ERROR: The partition with $datadir is too full!" echo "ERROR: The partition with $datadir is too full!" | $ERR_LOGGER exit 1 fi } ## Checks if there is a server running and if so if it is accessible. # # check_alive insists on a pingable server # check_dead also fails if there is a lost mysqld in the process list # # Usage: boolean mysqld_status [check_alive|check_dead] [warn|nowarn] mysqld_status() { # ping_output=`$MYADMIN ping 2>&1`; ping_alive=$(( ! $? )) ps_alive=0 if [ -f "$pidfile" ] && ps `cat $pidfile` >/dev/null 2>&1; then ps_alive=1; fi if [ "$1" = "check_alive" -a $ps_alive = 1 ] || [ "$1" = "check_dead" -a $ps_alive = 0 ]; then return 0 # EXIT_SUCCESS else if [ "$2" = "warn" ]; then # echo -e "$ps_alive processes alive and '$MYADMIN ping' resulted in\n$ping_output\n" | $ERR_LOGGER -p daemon.debug echo -e "$ps_alive processes alive\n" | $ERR_LOGGER -p daemon.debug fi return 1 # EXIT_FAILURE fi } # # main() # cmd=${1:-''} [ $# -ge 1 ] && shift other_args="$*" case "$cmd" in 'start') sanity_checks; # Start daemon log_daemon_msg "Starting MySQL database server" "mysqld" if mysqld_status check_alive nowarn; then log_progress_msg "already running" log_end_msg 0 else # Could be removed during boot test -e /var/run/mysqld || install -m 755 -o mysql -g root -d /var/run/mysqld # Check for additional wsrep options WSREP_OPTS=${WSREP_OPTS:-""} WSREP_PROVIDER=${WSREP_PROVIDER:-""} WSREP_CLUSTER_ADDRESS=${WSREP_CLUSTER_ADDRESS:-""} test -n "$WSREP_PROVIDER" && \ WSREP_OPTS="$WSREP_OPTS --wsrep_provider=$WSREP_PROVIDER" test -n "$WSREP_CLUSTER_ADDRESS" && \ WSREP_OPTS="$WSREP_OPTS --wsrep_cluster_address=$WSREP_CLUSTER_ADDRESS" # Start MySQL!. /usr/bin/mysqld_safe $WSREP_OPTS $other_args > /dev/null 2>&1 & # 6s was reported in #352070 to be too few when using ndbcluster for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14; do sleep 1 if mysqld_status check_alive nowarn ; then break; fi log_progress_msg "." done if mysqld_status check_alive warn; then log_end_msg 0 # Now start mysqlcheck or whatever the admin wants. # output=$(/etc/mysql/debian-start) # [ -n "$output" ] && log_action_msg "$output" else log_end_msg 1 log_failure_msg "Please take a look at the syslog" fi fi ;; 'stop') # * As a passwordless mysqladmin (e.g. via ~/.my.cnf) must be possible # at least for cron, we can rely on it here, too. (although we have # to specify it explicit as e.g. sudo environments points to the normal # users home and not /root) log_daemon_msg "Stopping MySQL database server" "mysqld" if ! mysqld_status check_dead nowarn; then log_daemon_msg "Killing MySQL database server by signal" "mysqld" pid=$(cat $pidfile || echo 0) if [ $pid -eq 0 ]; then log_failure_msg "Failed to get MySQL server pid" exit 1 fi kill -15 $pid server_down= for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do log_progress_msg "." sleep 1 if mysqld_status check_dead nowarn; then server_down=1; break; fi done fi if ! mysqld_status check_dead warn; then log_end_msg 1 log_failure_msg "Please stop MySQL manually and read /usr/share/doc/mysql-server-5.1/README.Debian.gz!" exit -1 else log_end_msg 0 fi ;; 'restart') set +e; $SELF stop; set -e $SELF start $other_args ;; 'reload'|'force-reload') log_daemon_msg "Reloading MySQL database server" "mysqld" $MYADMIN reload log_end_msg 0 ;; 'status') if mysqld_status check_alive nowarn; then # log_action_msg "$($MYADMIN version)" log_action_msg "MySQL is running (PID: $(cat $pidfile))" else log_action_msg "MySQL is stopped." exit 3 fi ;; *) echo "Usage: $SELF start|stop|restart|reload|force-reload|status" exit 1 ;; esac galera-3-25.3.20/scripts/mysql/debian/etc/0000755000015300001660000000000013042054732017767 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/logcheck/0000755000015300001660000000000013042054732021546 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/logcheck/ignore.d.workstation/0000755000015300001660000000000013042054732025636 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/logcheck/ignore.d.workstation/mysql-server-5_10000644000015300001660000000433613042054732030622 0ustar jenkinsjenkins/etc/init.d/mysql\[[0-9]+\]: [0-9]+ processes alive and '/usr/bin/mysqladmin --defaults-(extra-)?file=/etc/mysql/debian.cnf ping' resulted in$ /etc/init.d/mysql\[[0-9]+\]: Check that mysqld is running and that the socket: '/var/run/mysqld/mysqld.sock' exists\!$ /etc/init.d/mysql\[[0-9]+\]: '/usr/bin/mysqladmin --defaults-(extra-)?file=/etc/mysql/debian.cnf ping' resulted in$ /etc/mysql/debian-start\[[0-9]+\]: Checking for crashed MySQL tables\.$ mysqld\[[0-9]+\]: ?$ mysqld\[[0-9]+\]: .*InnoDB: Shutdown completed mysqld\[[0-9]+\]: .*InnoDB: Started; mysqld\[[0-9]+\]: .*InnoDB: Starting shutdown\.\.\.$ mysqld\[[0-9]+\]: .*\[Note\] /usr/sbin/mysqld: Normal shutdown$ mysqld\[[0-9]+\]: .*\[Note\] /usr/sbin/mysqld: ready for connections\.$ mysqld\[[0-9]+\]: .*\[Note\] /usr/sbin/mysqld: Shutdown complete$ mysqld\[[0-9]+\]: /usr/sbin/mysqld: ready for connections\.$ mysqld\[[0-9]+\]: .*/usr/sbin/mysqld: Shutdown Complete$ mysqld\[[0-9]+\]: Version: .* socket mysqld\[[0-9]+\]: Warning: Ignoring user change to 'mysql' because the user was set to 'mysql' earlier on the command line$ mysqld_safe\[[0-9]+\]: ?$ mysqld_safe\[[0-9]+\]: able to use the new GRANT command!$ mysqld_safe\[[0-9]+\]: ended$ mysqld_safe\[[0-9]+\]: http://www.mysql.com$ mysqld_safe\[[0-9]+\]: NOTE: If you are upgrading from a MySQL <= 3.22.10 you should run$ mysqld_safe\[[0-9]+\]: PLEASE REMEMBER TO SET A PASSWORD FOR THE MySQL root USER !$ mysqld_safe\[[0-9]+\]: Please report any problems with the /usr/bin/mysqlbug script!$ mysqld_safe\[[0-9]+\]: See the manual for more instructions.$ mysqld_safe\[[0-9]+\]: started$ mysqld_safe\[[0-9]+\]: Support MySQL by buying support/licenses at https://order.mysql.com$ mysqld_safe\[[0-9]+\]: The latest information about MySQL is available on the web at$ mysqld_safe\[[0-9]+\]: the /usr/bin/mysql_fix_privilege_tables. Otherwise you will not be$ mysqld_safe\[[0-9]+\]: To do so, start the server, then issue the following commands:$ mysqld_safe\[[0-9]+\]: /usr/bin/mysqladmin -u root -h app109 password 'new-password'$ mysqld_safe\[[0-9]+\]: /usr/bin/mysqladmin -u root password 'new-password'$ usermod\[[0-9]+\]: change user `mysql' GID from `([0-9]+)' to `\1'$ usermod\[[0-9]+\]: change user `mysql' shell from `/bin/false' to `/bin/false'$ galera-3-25.3.20/scripts/mysql/debian/etc/logcheck/ignore.d.server/0000755000015300001660000000000013042054732024560 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/logcheck/ignore.d.server/mysql-server-5_10000644000015300001660000000433613042054732027544 0ustar jenkinsjenkins/etc/init.d/mysql\[[0-9]+\]: [0-9]+ processes alive and '/usr/bin/mysqladmin --defaults-(extra-)?file=/etc/mysql/debian.cnf ping' resulted in$ /etc/init.d/mysql\[[0-9]+\]: Check that mysqld is running and that the socket: '/var/run/mysqld/mysqld.sock' exists\!$ /etc/init.d/mysql\[[0-9]+\]: '/usr/bin/mysqladmin --defaults-(extra-)?file=/etc/mysql/debian.cnf ping' resulted in$ /etc/mysql/debian-start\[[0-9]+\]: Checking for crashed MySQL tables\.$ mysqld\[[0-9]+\]: ?$ mysqld\[[0-9]+\]: .*InnoDB: Shutdown completed mysqld\[[0-9]+\]: .*InnoDB: Started; mysqld\[[0-9]+\]: .*InnoDB: Starting shutdown\.\.\.$ mysqld\[[0-9]+\]: .*\[Note\] /usr/sbin/mysqld: Normal shutdown$ mysqld\[[0-9]+\]: .*\[Note\] /usr/sbin/mysqld: ready for connections\.$ mysqld\[[0-9]+\]: .*\[Note\] /usr/sbin/mysqld: Shutdown complete$ mysqld\[[0-9]+\]: /usr/sbin/mysqld: ready for connections\.$ mysqld\[[0-9]+\]: .*/usr/sbin/mysqld: Shutdown Complete$ mysqld\[[0-9]+\]: Version: .* socket mysqld\[[0-9]+\]: Warning: Ignoring user change to 'mysql' because the user was set to 'mysql' earlier on the command line$ mysqld_safe\[[0-9]+\]: ?$ mysqld_safe\[[0-9]+\]: able to use the new GRANT command!$ mysqld_safe\[[0-9]+\]: ended$ mysqld_safe\[[0-9]+\]: http://www.mysql.com$ mysqld_safe\[[0-9]+\]: NOTE: If you are upgrading from a MySQL <= 3.22.10 you should run$ mysqld_safe\[[0-9]+\]: PLEASE REMEMBER TO SET A PASSWORD FOR THE MySQL root USER !$ mysqld_safe\[[0-9]+\]: Please report any problems with the /usr/bin/mysqlbug script!$ mysqld_safe\[[0-9]+\]: See the manual for more instructions.$ mysqld_safe\[[0-9]+\]: started$ mysqld_safe\[[0-9]+\]: Support MySQL by buying support/licenses at https://order.mysql.com$ mysqld_safe\[[0-9]+\]: The latest information about MySQL is available on the web at$ mysqld_safe\[[0-9]+\]: the /usr/bin/mysql_fix_privilege_tables. Otherwise you will not be$ mysqld_safe\[[0-9]+\]: To do so, start the server, then issue the following commands:$ mysqld_safe\[[0-9]+\]: /usr/bin/mysqladmin -u root -h app109 password 'new-password'$ mysqld_safe\[[0-9]+\]: /usr/bin/mysqladmin -u root password 'new-password'$ usermod\[[0-9]+\]: change user `mysql' GID from `([0-9]+)' to `\1'$ usermod\[[0-9]+\]: change user `mysql' shell from `/bin/false' to `/bin/false'$ galera-3-25.3.20/scripts/mysql/debian/etc/logcheck/ignore.d.paranoid/0000755000015300001660000000000013042054732025047 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/logcheck/ignore.d.paranoid/mysql-server-5_10000644000015300001660000000131013042054732030020 0ustar jenkinsjenkins/etc/init.d/mysql\[[0-9]+\]: Check that mysqld is running and that the socket: '/var/run/mysqld/mysqld.sock' exists\!$ /etc/init.d/mysql\[[0-9]+\]: '/usr/bin/mysqladmin --defaults-(extra-)?file=/etc/mysql/debian.cnf ping' resulted in$ /etc/mysql/debian-start\[[0-9]+\]: Checking for crashed MySQL tables\.$ mysqld\[[0-9]+\]: $ mysqld\[[0-9]+\]: Version: .* socket: '/var/run/mysqld/mysqld.sock' port: 3306$ mysqld\[[0-9]+\]: Warning: Ignoring user change to 'mysql' because the user was set to 'mysql' earlier on the command line$ mysqld_safe\[[0-9]+\]: started$ usermod\[[0-9]+\]: change user `mysql' GID from `([0-9]+)' to `\1'$ usermod\[[0-9]+\]: change user `mysql' shell from `/bin/false' to `/bin/false'$ galera-3-25.3.20/scripts/mysql/debian/etc/logrotate.d/0000755000015300001660000000000013042054732022211 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/logrotate.d/mysql-server0000644000015300001660000000150513042054732024606 0ustar jenkinsjenkins# - I put everything in one block and added sharedscripts, so that mysql gets # flush-logs'd only once. # Else the binary logs would automatically increase by n times every day. # - The error log is obsolete, messages go to syslog now. /var/log/mysql.log /var/log/mysql/mysql.log /var/log/mysql/mysql-slow.log { daily rotate 7 missingok create 640 mysql adm compress sharedscripts postrotate test -x /usr/bin/mysqladmin || exit 0 # If this fails, check debian.conf! MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" if [ -z "`$MYADMIN ping 2>/dev/null`" ]; then # Really no mysqld or rather a missing debian-sys-maint user? # If this occurs and is not a error please report a bug. if ps cax | grep -q mysqld; then exit 1 fi else $MYADMIN flush-logs fi endscript } galera-3-25.3.20/scripts/mysql/debian/etc/init.d/0000755000015300001660000000000013042054732021154 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/init.d/mysql0000755000015300001660000001250713042054732022254 0ustar jenkinsjenkins#!/bin/bash # ### BEGIN INIT INFO # Provides: mysql # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Should-Start: $network $named $time # Should-Stop: $network $named $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Start and stop the mysql database server daemon # Description: Controls the main MySQL database server daemon "mysqld" # and its wrapper script "mysqld_safe". ### END INIT INFO # set -e set -u ${DEBIAN_SCRIPT_DEBUG:+ set -v -x} test -x /usr/sbin/mysqld || exit 0 . /lib/lsb/init-functions SELF=$(cd $(dirname $0); pwd -P)/$(basename $0) CONF=/etc/mysql/my.cnf MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" # priority can be overriden and "-s" adds output to stderr ERR_LOGGER="logger -p daemon.err -t /etc/init.d/mysql -i" # Safeguard (relative paths, core dumps..) cd / umask 077 # mysqladmin likes to read /root/.my.cnf. This is usually not what I want # as many admins e.g. only store a password without a username there and # so break my scripts. export HOME=/etc/mysql/ ## Fetch a particular option from mysql's invocation. # # Usage: void mysqld_get_param option mysqld_get_param() { /usr/sbin/mysqld --print-defaults \ | tr " " "\n" \ | grep -- "--$1" \ | tail -n 1 \ | cut -d= -f2 } ## Do some sanity checks before even trying to start mysqld. sanity_checks() { # check for config file if [ ! -r /etc/mysql/my.cnf ]; then log_warning_msg "$0: WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" echo "WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" | $ERR_LOGGER fi # check for diskspace shortage datadir=`mysqld_get_param datadir` if LC_ALL=C BLOCKSIZE= df --portability $datadir/. | tail -n 1 | awk '{ exit ($4>4096) }'; then log_failure_msg "$0: ERROR: The partition with $datadir is too full!" echo "ERROR: The partition with $datadir is too full!" | $ERR_LOGGER exit 1 fi } ## Checks if there is a server running and if so if it is accessible. # # check_alive insists on a pingable server # check_dead also fails if there is a lost mysqld in the process list # # Usage: boolean mysqld_status [check_alive|check_dead] [warn|nowarn] mysqld_status () { ping_output=`$MYADMIN ping 2>&1`; ping_alive=$(( ! $? )) ps_alive=0 pidfile=`mysqld_get_param pid-file` if [ -f "$pidfile" ] && ps `cat $pidfile` >/dev/null 2>&1; then ps_alive=1; fi if [ "$1" = "check_alive" -a $ping_alive = 1 ] || [ "$1" = "check_dead" -a $ping_alive = 0 -a $ps_alive = 0 ]; then return 0 # EXIT_SUCCESS else if [ "$2" = "warn" ]; then echo -e "$ps_alive processes alive and '$MYADMIN ping' resulted in\n$ping_output\n" | $ERR_LOGGER -p daemon.debug fi return 1 # EXIT_FAILURE fi } # # main() # case "${1:-''}" in 'start') sanity_checks; # Start daemon log_daemon_msg "Starting MySQL database server" "mysqld" if mysqld_status check_alive nowarn; then log_progress_msg "already running" log_end_msg 0 else # Could be removed during boot test -e /var/run/mysqld || install -m 755 -o mysql -g root -d /var/run/mysqld # Start MySQL! /usr/bin/mysqld_safe > /dev/null 2>&1 & # 6s was reported in #352070 to be too few when using ndbcluster for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14; do sleep 1 if mysqld_status check_alive nowarn ; then break; fi log_progress_msg "." done if mysqld_status check_alive warn; then log_end_msg 0 # Now start mysqlcheck or whatever the admin wants. output=$(/etc/mysql/debian-start) [ -n "$output" ] && log_action_msg "$output" else log_end_msg 1 log_failure_msg "Please take a look at the syslog" fi fi ;; 'stop') # * As a passwordless mysqladmin (e.g. via ~/.my.cnf) must be possible # at least for cron, we can rely on it here, too. (although we have # to specify it explicit as e.g. sudo environments points to the normal # users home and not /root) log_daemon_msg "Stopping MySQL database server" "mysqld" if ! mysqld_status check_dead nowarn; then set +e shutdown_out=`$MYADMIN shutdown 2>&1`; r=$? set -e if [ "$r" -ne 0 ]; then log_end_msg 1 [ "$VERBOSE" != "no" ] && log_failure_msg "Error: $shutdown_out" log_daemon_msg "Killing MySQL database server by signal" "mysqld" killall -15 mysqld server_down= for i in 1 2 3 4 5 6 7 8 9 10; do sleep 1 if mysqld_status check_dead nowarn; then server_down=1; break; fi done if test -z "$server_down"; then killall -9 mysqld; fi fi fi if ! mysqld_status check_dead warn; then log_end_msg 1 log_failure_msg "Please stop MySQL manually and read /usr/share/doc/mysql-server-5.1/README.Debian.gz!" exit -1 else log_end_msg 0 fi ;; 'restart') set +e; $SELF stop; set -e $SELF start ;; 'reload'|'force-reload') log_daemon_msg "Reloading MySQL database server" "mysqld" $MYADMIN reload log_end_msg 0 ;; 'status') if mysqld_status check_alive nowarn; then log_action_msg "$($MYADMIN version)" else log_action_msg "MySQL is stopped." exit 3 fi ;; *) echo "Usage: $SELF start|stop|restart|reload|force-reload|status" exit 1 ;; esac galera-3-25.3.20/scripts/mysql/debian/etc/mysql/0000755000015300001660000000000013042054732021134 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/mysql/debian-start0000755000015300001660000000225613042054732023444 0ustar jenkinsjenkins#!/bin/bash # # This script is executed by "/etc/init.d/mysql" on every (re)start. # # Changes to this file will be preserved when updating the Debian package. # source /usr/share/mysql/debian-start.inc.sh MYSQL="/usr/bin/mysql --defaults-file=/etc/mysql/debian.cnf" MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" MYUPGRADE="/usr/bin/mysql_upgrade --defaults-extra-file=/etc/mysql/debian.cnf" MYCHECK="/usr/bin/mysqlcheck --defaults-file=/etc/mysql/debian.cnf" MYCHECK_SUBJECT="WARNING: mysqlcheck has found corrupt tables" MYCHECK_PARAMS="--all-databases --fast --silent" MYCHECK_RCPT="root" # The following commands should be run when the server is up but in background # where they do not block the server start and in one shell instance so that # they run sequentially. They are supposed not to echo anything to stdout. # If you want to disable the check for crashed tables comment # "check_for_crashed_tables" out. # (There may be no output to stdout inside the background process!) echo "Checking for corrupt, not cleanly closed and upgrade needing tables." ( upgrade_system_tables_if_necessary; check_root_accounts; check_for_crashed_tables; ) >&2 & exit 0 galera-3-25.3.20/scripts/mysql/debian/etc/mysql/conf.d/0000755000015300001660000000000013042054732022303 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/mysql/debian/etc/mysql/conf.d/mysqld_safe_syslog.cnf0000644000015300001660000000002513042054732026677 0ustar jenkinsjenkins[mysqld_safe] syslog galera-3-25.3.20/scripts/mysql/debian/5.6.list0000644000015300001660000002557213042054732020434 0ustar jenkinsjenkins# This is a MySQL-wsrep package description for ESP package manager %include common.list %requires libaio1 %requires mysql-client %replaces mysql-server-5.6 0.0.0 ${mysql_version} %replaces mysql-server-5.5 0.0.0 ${mysql_version} %provides mysql-server-5.6 ############################################## # Below files sorted into paragraphs # # in the path alphabetical order # ############################################## d 755 root root $BINS_DST - #in client f 755 root root $BINS_DST/innochecksum $MYSQL_SRC/extra/innochecksum f 755 root root $BINS_DST/msql2mysql $MYSQL_SRC/scripts/msql2mysql #in client f 755 root root $BINS_DST/my_print_defaults $MYSQL_SRC/extra/my_print_defaults f 755 root root $BINS_DST/myisamchk $MYSQL_SRC/storage/myisam/myisamchk f 755 root root $BINS_DST/myisamlog $MYSQL_SRC/storage/myisam/myisamlog f 755 root root $BINS_DST/myisampack $MYSQL_SRC/storage/myisam/myisampack f 755 root root $BINS_DST/mysql_convert_table_format $MYSQL_SRC/scripts/mysql_convert_table_format #f 755 root root $BINS_DST/mysql_fix_privilege_tables $MYSQL_SRC/scripts/mysql_fix_privilege_tables f 755 root root $BINS_DST/mysql_install_db $MYSQL_SRC/scripts/mysql_install_db f 755 root root $BINS_DST/mysql_secure_installation $MYSQL_SRC/scripts/mysql_secure_installation f 755 root root $BINS_DST/mysql_setpermission $MYSQL_SRC/scripts/mysql_setpermission f 755 root root $BINS_DST/mysql_tzinfo_to_sql $MYSQL_SRC/sql/mysql_tzinfo_to_sql f 755 root root $BINS_DST/mysql_upgrade $MYSQL_SRC/client/mysql_upgrade f 755 root root $BINS_DST/mysql_zap $MYSQL_SRC/scripts/mysql_zap f 755 root root $BINS_DST/mysqlbinlog $MYSQL_SRC/client/mysqlbinlog f 755 root root $BINS_DST/mysqld_multi $MYSQL_SRC/scripts/mysqld_multi f 755 root root $BINS_DST/mysqld_safe $GALERA_SRC/scripts/mysql/debian/mysqld_safe-5.5 #f 755 root root $BINS_DST/mysqld_safe $MYSQL_SRC/scripts/mysqld_safe f 755 root root $BINS_DST/mysqlhotcopy $MYSQL_SRC/scripts/mysqlhotcopy f 755 root root $BINS_DST/mysqltest $MYSQL_SRC/client/mysqltest #in client f 755 root root $BINS_DST/perror $MYSQL_SRC/extra/perror f 755 root root $BINS_DST/replace $MYSQL_SRC/extra/replace f 755 root root $BINS_DST/resolve_stack_dump $MYSQL_SRC/extra/resolve_stack_dump f 755 root root $BINS_DST/resolveip $MYSQL_SRC/extra/resolveip f 755 root root $BINS_DST/wsrep_sst_xtrabackup $MYSQL_SRC/scripts/wsrep_sst_xtrabackup d 755 root root $LIBS_DST - d 755 root root $PLUGIN_DST - f 755 root root $PLUGIN_DST/adt_null.so $MYSQL_SRC/plugin/audit_null/adt_null.so f 755 root root $PLUGIN_DST/auth.so $MYSQL_SRC/plugin/auth/auth.so f 755 root root $PLUGIN_DST/auth_socket.so $MYSQL_SRC/plugin/auth/auth_socket.so f 755 root root $PLUGIN_DST/auth_test_plugin.so $MYSQL_SRC/plugin/auth/auth_test_plugin.so f 755 root root $PLUGIN_DST/innodb_engine.so $MYSQL_SRC/plugin/innodb_memcached/innodb_memcache/innodb_engine.so f 755 root root $PLUGIN_DST/libdaemon_example.so $MYSQL_SRC/plugin/daemon_example/libdaemon_example.so f 755 root root $PLUGIN_DST/libmemcached.so $MYSQL_SRC/plugin/innodb_memcached/daemon_memcached/libmemcached.so f 755 root root $PLUGIN_DST/mypluglib.so $MYSQL_SRC/plugin/fulltext/mypluglib.so f 755 root root $PLUGIN_DST/qa_auth_client.so $MYSQL_SRC/plugin/auth/qa_auth_client.so f 755 root root $PLUGIN_DST/qa_auth_interface.so $MYSQL_SRC/plugin/auth/qa_auth_interface.so f 755 root root $PLUGIN_DST/qa_auth_server.so $MYSQL_SRC/plugin/auth/qa_auth_server.so f 755 root root $PLUGIN_DST/semisync_master.so $MYSQL_SRC/plugin/semisync/semisync_master.so f 755 root root $PLUGIN_DST/semisync_slave.so $MYSQL_SRC/plugin/semisync/semisync_slave.so f 755 root root $PLUGIN_DST/validate_password.so $MYSQL_SRC/plugin/password_validation/validate_password.so d 755 root root $SBIN_DST - f 755 root root $SBIN_DST/$MYSQLD_BINARY $MYSQL_SRC/sql/$MYSQLD_BINARY # /usr/share/doc/... d 755 root root $DOCS_DST - f 644 root root $DOCS_DST/COPYING $MYSQL_SRC/COPYING f 644 root root $DOCS_DST/ChangeLog $MYSQL_SRC/Docs/ChangeLog f 644 root root $DOCS_DST/INFO_BIN $MYSQL_SRC/Docs/INFO_BIN f 644 root root $DOCS_DST/INFO_SRC $MYSQL_SRC/Docs/INFO_SRC f 644 root root $DOCS_DST/INSTALL-BINARY $MYSQL_SRC/Docs/INSTALL-BINARY f 644 root root $DOCS_DST/README $MYSQL_SRC/README f 644 root root $DOCS_DST/myisam.txt $MYSQL_SRC/Docs/myisam.txt f 644 root root $DOCS_DST/sp-imp-spec.txt $MYSQL_SRC/Docs/sp-imp-spec.txt # manpages d 755 root root $MAN_DST/man1 - f 644 root root $MAN_DST/man1/comp_err.1.gz $MYSQL_SRC/man/comp_err.1.gz #in client f 644 root root $MAN_DST/man1/innochecksum.1.gz $MYSQL_SRC/man/innochecksum.1.gz f 644 root root $MAN_DST/man1/msql2mysql.1.gz $MYSQL_SRC/man/msql2mysql.1.gz #in client f 644 root root $MAN_DST/man1/my_print_defaults.1.gz $MYSQL_SRC/man/my_print_defaults.1.gz f 644 root root $MAN_DST/man1/myisamchk.1.gz $MYSQL_SRC/man/myisamchk.1.gz f 644 root root $MAN_DST/man1/myisamlog.1.gz $MYSQL_SRC/man/myisamlog.1.gz f 644 root root $MAN_DST/man1/myisampack.1.gz $MYSQL_SRC/man/myisampack.1.gz f 644 root root $MAN_DST/man1/mysql.server.1.gz $MYSQL_SRC/man/mysql.server.1.gz f 644 root root $MAN_DST/man1/mysql_convert_table_format.1.gz $MYSQL_SRC/man/mysql_convert_table_format.1.gz #f 644 root root $MAN_DST/man1/mysql_fix_privilege_tables.1.gz $MYSQL_SRC/man/mysql_fix_privilege_tables.1.gz f 644 root root $MAN_DST/man1/mysql_install_db.1.gz $MYSQL_SRC/man/mysql_install_db.1.gz f 644 root root $MAN_DST/man1/mysql_secure_installation.1.gz $MYSQL_SRC/man/mysql_secure_installation.1.gz f 644 root root $MAN_DST/man1/mysql_setpermission.1.gz $MYSQL_SRC/man/mysql_setpermission.1.gz f 644 root root $MAN_DST/man1/mysql_tzinfo_to_sql.1.gz $MYSQL_SRC/man/mysql_tzinfo_to_sql.1.gz f 644 root root $MAN_DST/man1/mysql_upgrade.1.gz $MYSQL_SRC/man/mysql_upgrade.1.gz f 644 root root $MAN_DST/man1/mysql_zap.1.gz $MYSQL_SRC/man/mysql_zap.1.gz f 644 root root $MAN_DST/man1/mysqlbinlog.1.gz $MYSQL_SRC/man/mysqlbinlog.1.gz f 644 root root $MAN_DST/man1/mysqld_multi.1.gz $MYSQL_SRC/man/mysqld_multi.1.gz f 644 root root $MAN_DST/man1/mysqld_safe.1.gz $MYSQL_SRC/man/mysqld_safe.1.gz f 644 root root $MAN_DST/man1/mysqlhotcopy.1.gz $MYSQL_SRC/man/mysqlhotcopy.1.gz f 644 root root $MAN_DST/man1/mysqltest.1.gz $MYSQL_SRC/man/mysqltest.1.gz #in client f 644 root root $MAN_DST/man1/perror.1.gz $MYSQL_SRC/man/perror.1.gz f 644 root root $MAN_DST/man1/replace.1.gz $MYSQL_SRC/man/replace.1.gz f 644 root root $MAN_DST/man1/resolve_stack_dump.1.gz $MYSQL_SRC/man/resolve_stack_dump.1.gz f 644 root root $MAN_DST/man1/resolveip.1.gz $MYSQL_SRC/man/resolveip.1.gz d 755 root root $MAN_DST/man8 - f 644 root root $MAN_DST/man8/mysqld.8.gz $MYSQL_SRC/man/mysqld.8.gz f 644 root root $SHAR_DST $MYSQL_SRC/support-files/config.*.ini f 644 root root $SHAR_DST $MYSQL_SRC/scripts/*.sql f 644 root root $SHAR_DST/binary-configure $MYSQL_SRC/support-files/binary-configure f 644 root root $SHAR_DST/errmsg-utf8.txt $MYSQL_SRC/sql/share/errmsg-utf8.txt f 644 root root $SHAR_DST $MYSQL_SRC/support-files/my-*.cnf f 644 root root $SHAR_DST/mysql-log-rotate $MYSQL_SRC/support-files/mysql-log-rotate f 755 root root $SHAR_DST/mysql.server $MYSQL_SRC/support-files/mysql.server f 644 root root $SHAR_DST/mysqld_multi.server $MYSQL_SRC/support-files/mysqld_multi.server # f 644 root root $SHAR_DST/ndb-config-2-node.ini $MYSQL_SRC/support-files/ndb-config-2-node.ini d 755 root root $SHAR_DST/charsets - f 644 root root $SHAR_DST/charsets $MYSQL_SRC/sql/share/charsets/* $lang00=czech d 755 root root $SHAR_DST/${lang00} - f 644 root root $SHAR_DST/${lang00} $MYSQL_SRC/sql/share/${lang00}/* $lang01=danish d 755 root root $SHAR_DST/${lang01} - f 644 root root $SHAR_DST/${lang01} $MYSQL_SRC/sql/share/${lang01}/* $lang02=dutch d 755 root root $SHAR_DST/${lang02} - f 644 root root $SHAR_DST/${lang02} $MYSQL_SRC/sql/share/${lang02}/* $lang03=english d 755 root root $SHAR_DST/${lang03} - f 644 root root $SHAR_DST/${lang03} $MYSQL_SRC/sql/share/${lang03}/* $lang04=estonian d 755 root root $SHAR_DST/${lang04} - f 644 root root $SHAR_DST/${lang04} $MYSQL_SRC/sql/share/${lang04}/* $lang05=french d 755 root root $SHAR_DST/${lang05} - f 644 root root $SHAR_DST/${lang05} $MYSQL_SRC/sql/share/${lang05}/* $lang06=german d 755 root root $SHAR_DST/${lang06} - f 644 root root $SHAR_DST/${lang06} $MYSQL_SRC/sql/share/${lang06}/* $lang07=greek d 755 root root $SHAR_DST/${lang07} - f 644 root root $SHAR_DST/${lang07} $MYSQL_SRC/sql/share/${lang07}/* $lang08=hungarian d 755 root root $SHAR_DST/${lang08} - f 644 root root $SHAR_DST/${lang08} $MYSQL_SRC/sql/share/${lang08}/* $lang09=italian d 755 root root $SHAR_DST/${lang09} - f 644 root root $SHAR_DST/${lang09} $MYSQL_SRC/sql/share/${lang09}/* $lang10=japanese d 755 root root $SHAR_DST/${lang10} - f 644 root root $SHAR_DST/${lang10} $MYSQL_SRC/sql/share/${lang10}/* $lang11=korean d 755 root root $SHAR_DST/${lang11} - f 644 root root $SHAR_DST/${lang11} $MYSQL_SRC/sql/share/${lang11}/* $lang12=norwegian d 755 root root $SHAR_DST/${lang12} - f 644 root root $SHAR_DST/${lang12} $MYSQL_SRC/sql/share/${lang12}/* $lang13=norwegian-ny d 755 root root $SHAR_DST/${lang13} - f 644 root root $SHAR_DST/${lang13} $MYSQL_SRC/sql/share/${lang13}/* $lang14=polish d 755 root root $SHAR_DST/${lang14} - f 644 root root $SHAR_DST/${lang14} $MYSQL_SRC/sql/share/${lang14}/* $lang15=portuguese d 755 root root $SHAR_DST/${lang15} - f 644 root root $SHAR_DST/${lang15} $MYSQL_SRC/sql/share/${lang15}/* $lang16=romanian d 755 root root $SHAR_DST/${lang16} - f 644 root root $SHAR_DST/${lang16} $MYSQL_SRC/sql/share/${lang16}/* $lang17=russian d 755 root root $SHAR_DST/${lang17} - f 644 root root $SHAR_DST/${lang17} $MYSQL_SRC/sql/share/${lang17}/* $lang18=serbian d 755 root root $SHAR_DST/${lang18} - f 644 root root $SHAR_DST/${lang18} $MYSQL_SRC/sql/share/${lang18}/* $lang19=slovak d 755 root root $SHAR_DST/${lang19} - f 644 root root $SHAR_DST/${lang19} $MYSQL_SRC/sql/share/${lang19}/* $lang20=spanish d 755 root root $SHAR_DST/${lang20} - f 644 root root $SHAR_DST/${lang20} $MYSQL_SRC/sql/share/${lang20}/* $lang21=swedish d 755 root root $SHAR_DST/${lang21} - f 644 root root $SHAR_DST/${lang21} $MYSQL_SRC/sql/share/${lang21}/* $lang22=ukrainian d 755 root root $SHAR_DST/${lang22} - f 644 root root $SHAR_DST/${lang22} $MYSQL_SRC/sql/share/${lang22}/* d 755 mysql root /var/run/mysqld - # galera-3-25.3.20/scripts/mysql/debian/common.list0000644000015300001660000001300213042054732021375 0ustar jenkinsjenkins# This is a MySQL-wsrep package description for ESP package manager # Debian specific part %product wsrep-enabled MySQL server %copyright MySQL AB, Codership Oy, All Rights Reserved %vendor Codership Oy %license COPYING %readme README %description MySQL server + wsrep patch (https://launchpad.net/codership-mysql) $mysql_version=${MYSQL_VER} $wsrep_version=${WSREP_VER} %version ${mysql_version}-${wsrep_version} %requires psmisc %requires debianutils 1.6 %requires libc6 2.4 %requires libdbi-perl %requires libdbd-mysql-perl 1.2202 %requires libgcc1 4.1.1 %requires libncurses5 5.6 %requires libstdc++6 4.1.1 %requires libwrap0 7.6 %requires perl %requires zlib1g 1.1.4 %requires lsof %replaces mysql-server-core 0.0.0 ${mysql_version} %replaces mysql-server-core-5.0 0.0.0 ${mysql_version} %replaces mysql-server-core-5.1 0.0.0 ${mysql_version} %replaces mysql-server 0.0.0 ${mysql_version} %replaces mysql-server-5.0 0.0.0 ${mysql_version} %replaces mysql-server-5.1 0.0.0 ${mysql_version} %provides mysql-server-core %provides mysql-server #%incompat mysql-server-core #%incompat mysql-server-core-4.1 #%incompat mysql-server-core-5.0 #%incompat mysql-server-core-5.1 #%incompat mysql-server #%incompat mysql-server-4.1 #%incompat mysql-server-5.0 #%incompat mysql-server-5.1 $prefix=/usr $CONF_DST=/etc/mysql $LIBS_DST=${prefix}/lib/mysql $PLUGIN_DST=${LIBS_DST}/plugin $SHAR_DST=${prefix}/share/mysql $SBIN_DST=${prefix}/sbin $BINS_DST=${prefix}/bin $DOCS_DST=${prefix}/share/doc/mysql-server-${MYSQL_MAJOR_VER}.${MYSQL_MINOR_VER} $MAN_DST=${prefix}/share/man # Distribution dependent files $SRC=$GALERA_SRC/scripts/mysql/debian $ETC=$SRC/etc d 755 root root /etc/init.d - f 755 root root /etc/init.d/mysql $SRC/mysql d 755 root root /etc/logrotate.d - f 755 root root /etc/logrotate.d/mysql-server $ETC/logrotate.d/mysql-server d 755 root root /etc/logcheck - d 755 root root /etc/logcheck/ignore.d.paranoid - f 644 root root /etc/logcheck/ignore.d.paranoid/mysql-server-${MYSQL_MAJOR_VER}_${MYSQL_MINOR_VER} $ETC/logcheck/ignore.d.paranoid/mysql-server-5_1 d 755 root root /etc/logcheck/ignore.d.server - f 644 root root /etc/logcheck/ignore.d.server/mysql-server-${MYSQL_MAJOR_VER}_${MYSQL_MINOR_VER} $ETC/logcheck/ignore.d.server/mysql-server-5_1 d 755 root root /etc/logcheck/ignore.d.workstation - f 644 root root /etc/logcheck/ignore.d.workstation/mysql-server-${MYSQL_MAJOR_VER}_${MYSQL_MINOR_VER} $ETC/logcheck/ignore.d.workstation/mysql-server-5_1 d 755 root root $CONF_DST f 755 root root $CONF_DST/debian-start $ETC/mysql/debian-start d 755 root root $CONF_DST/conf.d f 644 root root $CONF_DST/conf.d/mysqld_safe_syslog.cnf $ETC/mysql/conf.d/mysqld_safe_syslog.cnf d 755 root root $SHAR_DST - f 755 root root $SHAR_DST/debian-start.inc.sh $SRC/debian-start.inc.sh f 755 root root $SHAR_DST/echo_stderr $SRC/echo_stderr f 755 root root $BINS_DST/my_print_defaults_wsrep $MYSQL_SRC/extra/my_print_defaults ########################## # wsrep-specific files # ########################## c 640 mysql mysql $CONF_DST/conf.d/wsrep.cnf $MYSQL_SRC/support-files/wsrep.cnf f 755 root root $BINS_DST/wsrep_sst_common $MYSQL_SRC/scripts/wsrep_sst_common f 755 root root $BINS_DST/wsrep_sst_mysqldump $MYSQL_SRC/scripts/wsrep_sst_mysqldump f 755 root root $BINS_DST/wsrep_sst_rsync $MYSQL_SRC/scripts/wsrep_sst_rsync l 755 root root $BINS_DST/wsrep_sst_rsync_wan wsrep_sst_rsync #f 755 root root $BINS_DST/wsrep_sst_xtrabackup $MYSQL_SRC/scripts/wsrep_sst_xtrabackup f 644 root root $DOCS_DST/README-wsrep $MYSQL_SRC/Docs/README-wsrep f 644 root root $SHAR_DST/wsrep.cnf $MYSQL_SRC/support-files/wsrep.cnf f 644 root root $SHAR_DST/wsrep_notify $MYSQL_SRC/support-files/wsrep_notify ################################## # Distribution dependent scripts # ################################## $mysql_data=/var/lib/mysql # Add mysql group and user if there are none %preinstall </dev/null || addgroup --system mysql >/dev/null getent passwd mysql >/dev/null || \ adduser --system --disabled-login --ingroup mysql --home ${mysql_data} \ --gecos "MySQL Server" --shell /bin/false mysql >/dev/null [ -e "$BINS_DST/my_print_defaults" ] || \ ( cd $BINS_DST && ln -sf my_print_defaults_wsrep my_print_defaults ) EOF_PREINSTALL %postinstall </dev/null || exit $ ldconfig -n $LIBS_DST #test -d ${mysql_data} || (mysql_install_db --user=mysql --datadir=${mysql_data}) # it seems that we can run mysql_install_db regardless of existing tables. mysql_install_db --wsrep-on=0 --user=mysql --datadir=${mysql_data} --basedir=/usr # This is a fix/workaround for AppArmor profile provided with mysql-server deb [ ! -d /etc/apparmor.d/disable ] || \ ( cd /etc/apparmor.d/disable && ln -sf ../usr.sbin.mysqld ./ ) [ ! -x /etc/init.d/apparmor ] || /etc/init.d/apparmor restart EOF_POSTINSTALL %preremove </dev/null || exit $ [ ! -L $BINS_DST/my_print_defaults ] || rm -rf $BINS_DST/my_print_defaults [ ! -L /etc/apparmor.d/disable/usr.sbin.mysqld ] || rm -rf /etc/apparmor.d/disable/usr.sbin.mysqld [ ! -x /etc/init.d/apparmor ] || /etc/init.d/apparmor restart EOF_PREREMOVE # galera-3-25.3.20/scripts/mysql/build.sh0000755000015300001660000006466713042054732017453 0ustar jenkinsjenkins#!/bin/bash -eux if test -z "$MYSQL_SRC" then echo "MYSQL_SRC variable pointing at MySQL/wsrep sources is not set. Can't continue." exit -1 fi use_mysql_5.1_sources() { MYSQL_MAJOR="5.1" export MYSQL_MAJOR # for DEB build export MYSQL_MAJOR_VER="5" export MYSQL_MINOR_VER="1" MYSQL_VER=`grep AC_INIT $MYSQL_SRC/configure.in | awk -F '[' '{ print $3 }' | awk -F ']' '{ print $1 }'` } use_mariadb_5.1_sources() { use_mysql_5.1_sources } use_mysql_5.5_sources() { export MYSQL_MAJOR_VER=`grep MYSQL_VERSION_MAJOR $MYSQL_SRC/VERSION | cut -d = -f 2` export MYSQL_MINOR_VER=`grep MYSQL_VERSION_MINOR $MYSQL_SRC/VERSION | cut -d = -f 2` export MYSQL_PATCH_VER=`grep MYSQL_VERSION_PATCH $MYSQL_SRC/VERSION | cut -d = -f 2` MYSQL_MAJOR=$MYSQL_MAJOR_VER.$MYSQL_MINOR_VER export MYSQL_MAJOR # for DEB build MYSQL_VER=$MYSQL_MAJOR.$MYSQL_PATCH_VER } if test -f "$MYSQL_SRC/configure.in" then use_mysql_5.1_sources elif test -f "$MYSQL_SRC/VERSION" then use_mysql_5.5_sources else echo "Unknown MySQL version in MYSQL_SRC path. Versions 5.1 and 5.5 are supported. Can't continue." exit -1 fi # Initializing variables to defaults uname -p | grep -q 'i[36]86' && CPU=pentium || CPU=amd64 # this works for x86 Solaris too BOOTSTRAP=no DEBUG=no DEBUG_LEVEL=0 GALERA_DEBUG=no NO_STRIP=no RELEASE="" TAR=no BIN_DIST=no PACKAGE=no INSTALL=no CONFIGURE=no SKIP_BUILD=no SKIP_CONFIGURE=no SKIP_CLIENTS=no SCRATCH=no SCONS="yes" JOBS=1 GCOMM_IMPL=${GCOMM_IMPL:-"galeracomm"} TARGET="" MYSQLD_BINARY="mysqld" SYNC_BEFORE_PACK=${SYNC_BEFORE_PACK:-""} OS=$(uname) case "$OS" in "Linux") JOBS=$(grep -c ^processor /proc/cpuinfo) ;; "SunOS") JOBS=$(psrinfo | wc -l | tr -d ' ') ;; "Darwin" | "FreeBSD") JOBS="$(sysctl -n hw.ncpu)" ;; *) echo "CPU information not available: unsupported OS: '$OS'";; esac if [ "$OS" == "FreeBSD" ]; then CC=${CC:-"gcc48"} CXX=${CXX:-"g++48"} LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-"/usr/local/lib/$(basename $CC)"} else CC=${CC:-"gcc"} CXX=${CXX:-"g++"} fi if ! which "$CC" ; then echo "Can't execute $CC" ; exit 1; fi if ! which "$CXX"; then echo "Can't execute $CXX"; exit 1; fi export CC CXX LD_LIBRARY_PATH EXTRA_SYSROOT=${EXTRA_SYSROOT:-""} if [ "$OS" == "Darwin" ]; then if which -s port && test -x /opt/local/bin/port; then EXTRA_SYSROOT=/opt/local elif which -s brew && test -x /usr/local/bin/brew; then EXTRA_SYSROOT=/usr/local elif which -s fink && test -x /sw/bin/fink; then EXTRA_SYSROOT=/sw fi elif [ "$OS" == "FreeBSD" ]; then EXTRA_SYSROOT=/usr/local fi usage() { cat < --last-stage -s|--scratch build everything from scratch -c|--configure reconfigure the build system (implies -s) -b|--bootstap rebuild the build system (implies -c) -o|--opt configure build with debug disabled (implies -c) -m32/-m64 build 32/64-bit binaries on x86 -d|--debug configure build with debug enabled (implies -c) -dl|--debug-level set debug level (1, implies -c) --gd|--galera-debug only galera debug build (optimized mysqld) --with-spread configure build with Spread (implies -c) --no-strip prevent stripping of release binaries -j|--jobs number of parallel compilation jobs (${JOBS}) -p|--package create DEB/RPM packages (depending on the distribution) --bin create binary tar package -t|--tar create a demo test distribution --sb|--skip-build skip the actual build, use the existing binaries --sc|--skip-configure skip configure --skip-clients don't include client binaries in test package --scons use scons to build galera libraries (yes) -r|--release , otherwise revisions will be used -s and -b options affect only Galera build. EOF } # Parse command line while test $# -gt 0 do case $1 in -b|--bootstrap) BOOTSTRAP="yes" # Bootstrap the build system CONFIGURE="yes" ;; --bin) BIN_DIST="yes" ;; -c|--configure) CONFIGURE="yes" # Reconfigure the build system ;; -s|--scratch) SCRATCH="yes" # Build from scratch (run make clean) ;; -o|--opt) CONFIGURE="yes" # Reconfigure without debug ;; -d|--debug) DEBUG="yes" # Reconfigure with debug CONFIGURE="yes" NO_STRIP="yes" # Don't strip the binaries ;; --dl|--debug-level) shift; DEBUG_LEVEL=$1 ;; --gd|--galera-debug) GALERA_DEBUG="yes" ;; -r|--release) RELEASE="$2" # Compile without debug CONFIGURE="yes" shift ;; -t|--tar) TAR="yes" # Create a TGZ package ;; -i|--install) INSTALL="yes" ;; -p|--package) PACKAGE="yes" # Create a DEB package CONFIGURE="yes" # don't forget to reconfigure with --prefix=/usr ;; -j|--jobs) shift; JOBS=$1 ;; --no-strip) NO_STRIP="yes" # Don't strip the binaries ;; --with*-spread) WITH_SPREAD="$1" ;; -m32) CFLAGS="$CFLAGS -m32" CXXFLAGS="$CXXFLAGS -m32" CONFIGURE="yes" CPU="pentium" TARGET="i686" ;; -m64) CFLAGS="$CFLAGS -m64" CXXFLAGS="$CXXFLAGS -m64" CONFIGURE="yes" CPU="amd64" TARGET="x86_64" ;; --sb|--skip-build) SKIP_BUILD="yes" ;; --sc|--skip-configure) SKIP_CONFIGURE="yes" ;; --skip-clients) SKIP_CLIENTS="yes" ;; --scons) SCONS="yes" ;; --help) usage exit 0 ;; *) echo "Unrecognized option: $1" usage exit 1 ;; esac shift done if [ "$PACKAGE" == "yes" -a "$OS" == "Linux" ] then # check whether sudo accepts -E to preserve environment echo "testing sudo" if sudo -E $(which epm) --version >/dev/null 2>&1 then echo "sudo accepts -E" SUDO_ENV="sudo -E" SUDO="sudo" else echo "sudo does not accept param -E" if [ $(id -ur) != 0 ] then echo "error, must build as root" exit 1 else SUDO_ENV="" SUDO="" echo "I'm root, can continue" fi fi # If packaging with epm, make sure that mysql user exists in build system to # get file ownerships right. echo "Checking for mysql user and group for epm:" getent passwd mysql >/dev/null if [ $? != 0 ] then echo "Error: user 'mysql' does not exist" exit 1 else echo "User 'mysql' ok" fi getent group mysql >/dev/null if [ $? != 0 ] then echo "Error: group 'mysql' doest not exist" exit 1 else echo "Group 'mysql' ok" fi fi if [ "$INSTALL" == "yes" ]; then TAR="yes"; fi if [ "$SKIP_BUILD" == "yes" ]; then CONFIGURE="no"; fi which dpkg >/dev/null 2>&1 && DEBIAN=1 || DEBIAN=0 # export command options for Galera build export BOOTSTRAP CONFIGURE SCRATCH DEBUG WITH_SPREAD CFLAGS CXXFLAGS \ PACKAGE CPU TARGET SKIP_BUILD RELEASE DEBIAN SCONS JOBS DEBUG_LEVEL set -eu # Absolute path of this script folder BUILD_ROOT=$(cd $(dirname $0); pwd -P) GALERA_SRC=${GALERA_SRC:-$BUILD_ROOT/../../} # Source paths are either absolute or relative to script, get absolute MYSQL_SRC=$(cd $MYSQL_SRC; pwd -P; cd $BUILD_ROOT) GALERA_SRC=$(cd $GALERA_SRC; pwd -P; cd $BUILD_ROOT) if [ "$MYSQL_MAJOR" = "5.1" ] then MYSQL_BUILD_DIR="$MYSQL_SRC" else [ "$DEBUG" == "yes" ] \ && MYSQL_BUILD_DIR="$MYSQL_SRC/build_debug" \ || MYSQL_BUILD_DIR="$MYSQL_SRC/build_release" fi ###################################### ## ## ## Build Galera ## ## ## ###################################### # Also obtain SVN revision information if [ "$TAR" == "yes" -o "$BIN_DIST" == "yes" ] then cd $GALERA_SRC debug_opt="" if [ $GALERA_DEBUG == "yes" ] then debug_opt="-d" fi scripts/build.sh $debug_opt # options are passed via environment variables # sadly we can't easily pass GALERA_REV from Galera build script GALERA_REV=${GALERA_REV:-"XXXX"} fi ###################################### ## ## ## Build MySQL ## ## ## ###################################### # Obtain MySQL version and revision number cd $MYSQL_SRC WSREP_REV=$(git log --pretty=oneline | wc -l) || \ WSREP_REV=$(bzr revno --tree -q) || \ WSREP_REV="XXXX" WSREP_REV=${WSREP_REV//[[:space:]]/} # this does not work on an unconfigured source MYSQL_VER=$(grep '#define VERSION' $MYSQL_SRC/include/config.h | sed s/\"//g | cut -d ' ' -f 3 | cut -d '-' -f 1-2) if [ "$PACKAGE" == "yes" ] || [ "$BIN_DIST" == "yes" ] then # fetch and patch pristine sources cd ${TMPDIR:-/tmp} mysql_tag=mysql-$MYSQL_VER if [ "$SKIP_BUILD" == "no" ] || [ ! -d $mysql_tag ] then mysql_orig_tar_gz=$mysql_tag.tar.gz url2=http://ftp.sunet.se/pub/unix/databases/relational/mysql/Downloads/MySQL-$MYSQL_MAJOR url1=ftp://sunsite.informatik.rwth-aachen.de/pub/mirror/www.mysql.com/Downloads/MySQL-$MYSQL_MAJOR if [ ! -r $mysql_orig_tar_gz ] then echo "Downloading $mysql_orig_tar_gz..." wget -N $url1/$mysql_orig_tar_gz || wget -N $url2/$mysql_orig_tar_gz fi echo "Getting wsrep patch..." patch_file=$(${BUILD_ROOT}/get_patch.sh $mysql_tag $MYSQL_SRC) echo "Patching source..." rm -rf $mysql_tag # need clean sources for a patch tar -xzf $mysql_orig_tar_gz cd $mysql_tag/ patch -p1 -f < $patch_file >/dev/null || : # chmod a+x ./BUILD/*wsrep CONFIGURE="yes" else cd $mysql_tag/ fi MYSQL_SRC=$(pwd -P) MYSQL_BUILD_DIR=$MYSQL_SRC if [ "$CONFIGURE" == "yes" ] then echo "Regenerating config files" time ./BUILD/autorun.sh fi fi echo "Building mysqld" export WSREP_REV export MAKE="make -j$JOBS" if [ "$SKIP_BUILD" == "no" ] then if [ "$CONFIGURE" == "yes" ] && [ "$SKIP_CONFIGURE" == "no" ] then rm -f config.status BUILD_OPT="" if [ "$OS" == "FreeBSD" ]; then # don't use INSTALL_LAYOUT=STANDALONE(default), it assumes prefix=. CMAKE_LAYOUT_OPTIONS=( -DCMAKE_INSTALL_PREFIX="/usr/local" \ -DINSTALL_LAYOUT=RPM \ -DMYSQL_UNIX_ADDR="/tmp/mysql.sock" \ -DINSTALL_BINDIR="bin" \ -DINSTALL_DOCDIR="share/doc/mysql" \ -DINSTALL_DOCREADMEDIR="share/doc/mysql" \ -DINSTALL_INCLUDEDIR="include/mysql" \ -DINSTALL_INFODIR="info" \ -DINSTALL_LIBDIR="lib/mysql" \ -DINSTALL_MANDIR="man" \ -DINSTALL_MYSQLDATADIR="/var/db/mysql" \ -DINSTALL_MYSQLSHAREDIR="share/mysql" \ -DINSTALL_MYSQLTESTDIR="share/mysql/tests" \ -DINSTALL_PLUGINDIR="lib/mysql/plugin" \ -DINSTALL_SBINDIR="libexec" \ -DINSTALL_SCRIPTDIR="bin" \ -DINSTALL_SHAREDIR="share" \ -DINSTALL_SQLBENCHDIR="share/mysql" \ -DINSTALL_SUPPORTFILESDIR="share/mysql" \ -DWITH_UNIT_TESTS=0 \ -DWITH_LIBEDIT=0 \ -DWITH_LIBWRAP=1 \ ) else [ $DEBIAN -ne 0 ] && \ MYSQL_SOCKET_PATH="/var/run/mysqld/mysqld.sock" || \ MYSQL_SOCKET_PATH="/var/lib/mysql/mysql.sock" CMAKE_LAYOUT_OPTIONS=( \ -DINSTALL_LAYOUT=RPM \ -DCMAKE_INSTALL_PREFIX="/usr" \ -DINSTALL_SBINDIR="/usr/sbin" \ -DMYSQL_DATADIR="/var/lib/mysql" \ -DMYSQL_UNIX_ADDR=$MYSQL_SOCKET_PATH \ -DCMAKE_OSX_ARCHITECTURES=$(uname -m) \ ) fi [ -n "$EXTRA_SYSROOT" ] && \ CMAKE_LAYOUT_OPTIONS+=( \ -DCMAKE_PREFIX_PATH="$EXTRA_SYSROOT" \ ) if [ $MYSQL_MAJOR = "5.1" ] then # This will be put to --prefix by SETUP.sh. export MYSQL_BUILD_PREFIX="/usr" export wsrep_configs="--libexecdir=/usr/sbin \ --localstatedir=/var/lib/mysql/ \ --with-unix-socket-path=$MYSQL_SOCKET_PATH \ --with-extra-charsets=all \ --with-ssl" [ "$DEBUG" = "yes" ] && BUILD_OPT="-debug" BUILD/compile-${CPU}${BUILD_OPT}-wsrep > /dev/null else # CMake build [ "$DEBUG" = "yes" ] \ && BUILD_OPT="-DCMAKE_BUILD_TYPE=Debug -DDEBUG_EXTNAME=OFF" \ || BUILD_OPT="-DCMAKE_BUILD_TYPE=RelWithDebInfo" # like in RPM spec MYSQL_MM_VER="$MYSQL_MAJOR_VER$MYSQL_MINOR_VER" [ "$MYSQL_MM_VER" -ge "56" ] \ && MEMCACHED_OPT="-DWITH_LIBEVENT=yes -DWITH_INNODB_MEMCACHED=ON" \ || MEMCACHED_OPT="" if [ "$MYSQL_MM_VER" -ge "57" ] then BOOST_OPT="-DWITH_BOOST=boost_$MYSQL_MM_VER" [ "yes" = "$BOOTSTRAP" ] && \ BOOST_OPT="$BOOST_OPT -DDOWNLOAD_BOOST=1" else BOOST_OPT="" fi if [ "$MYSQL_BUILD_DIR" != "$MYSQL_SRC" ] then [ "$BOOTSTRAP" = "yes" ] && rm -rf $MYSQL_BUILD_DIR [ -d "$MYSQL_BUILD_DIR" ] || mkdir -p $MYSQL_BUILD_DIR fi pushd $MYSQL_BUILD_DIR # cmake wants either absolute path or a link from build directory # Incidentally link trick also allows us to use ccache # (at least it distinguishes between gcc/clang) ln -sf $(which ccache || which $CC) $(basename $CC) ln -sf $(which ccache || which $CXX) $(basename $CXX) cmake \ -DCMAKE_C_COMPILER=$(basename $CC) \ -DCMAKE_CXX_COMPILER=$(basename $CXX) \ -DBUILD_CONFIG=mysql_release \ "${CMAKE_LAYOUT_OPTIONS[@]}" \ $BUILD_OPT \ -DWITH_WSREP=1 \ -DWITH_EXTRA_CHARSETS=all \ -DWITH_SSL=yes \ -DWITH_ZLIB=system \ -DMYSQL_MAINTAINER_MODE=0 \ $MEMCACHED_OPT \ $BOOST_OPT \ $MYSQL_SRC \ && make -j $JOBS -S && popd || exit 1 fi else # just recompile and relink with old configuration [ $MYSQL_MAJOR != "5.1" ] && pushd $MYSQL_BUILD_DIR make -j $JOBS -S > /dev/null [ $MYSQL_MAJOR != "5.1" ] && popd fi fi # SKIP_BUILD # gzip manpages # this should be rather fast, so we can repeat it every time if [ "$PACKAGE" == "yes" ] then cd $MYSQL_SRC/man && for i in *.1 *.8; do gzip -c $i > $i.gz; done || : fi ###################################### ## ## ## Making of demo tarball ## ## ## ###################################### install_mysql_5.1_demo() { MYSQL_LIBS=$MYSQL_DIST_DIR/lib/mysql MYSQL_PLUGINS=$MYSQL_DIST_DIR/lib/mysql/plugin MYSQL_CHARSETS=$MYSQL_DIST_DIR/share/mysql/charsets # BSD-based OSes does not have -D option on 'install' install -m 755 -d $MYSQL_DIST_DIR/share/mysql/english install -m 644 $MYSQL_SRC/sql/share/english/errmsg.sys $MYSQL_DIST_DIR/share/mysql/english/errmsg.sys install -m 755 -d $MYSQL_DIST_DIR/sbin install -m 755 $MYSQL_SRC/sql/mysqld $MYSQL_DIST_DIR/sbin/mysqld if [ "$SKIP_CLIENTS" == "no" ] then # Hack alert: # install libmysqlclient.so as libmysqlclient.so.16 as client binaries # seem to be linked against explicit version. Figure out better way to # deal with this. install -m 755 -d $MYSQL_LIBS install -m 755 $MYSQL_SRC/libmysql/.libs/libmysqlclient.so $MYSQL_LIBS/libmysqlclient.so.16 fi if test -f $MYSQL_SRC/storage/innodb_plugin/.libs/ha_innodb_plugin.so then install -m 755 -d $MYSQL_PLUGINS install -m 755 $MYSQL_SRC/storage/innodb_plugin/.libs/ha_innodb_plugin.so \ $MYSQL_PLUGINS/ha_innodb_plugin.so fi install -m 755 -d $MYSQL_BINS install -m 644 $MYSQL_SRC/sql/share/english/errmsg.sys $MYSQL_DIST_DIR/share/mysql/english/errmsg.sys install -m 755 -d $MYSQL_DIST_DIR/sbin install -m 755 $MYSQL_SRC/sql/mysqld $MYSQL_DIST_DIR/sbin/mysqld if [ "$SKIP_CLIENTS" == "no" ] then # Hack alert: # install libmysqlclient.so as libmysqlclient.so.16 as client binaries # seem to be linked against explicit version. Figure out better way to # deal with this. install -m 755 -d $MYSQL_LIBS install -m 755 $MYSQL_SRC/libmysql/.libs/libmysqlclient.so $MYSQL_LIBS/libmysqlclient.so.16 fi if test -f $MYSQL_SRC/storage/innodb_plugin/.libs/ha_innodb_plugin.so then install -m 755 -d $MYSQL_PLUGINS install -m 755 $MYSQL_SRC/storage/innodb_plugin/.libs/ha_innodb_plugin.so \ $MYSQL_PLUGINS/ha_innodb_plugin.so fi install -m 755 -d $MYSQL_BINS if [ "$SKIP_CLIENTS" == "no" ] then if [ -x $MYSQL_SRC/client/.libs/mysql ] # MySQL then MYSQL_CLIENTS=$MYSQL_SRC/client/.libs elif [ -x $MYSQL_SRC/client/mysql ] # MariaDB then MYSQL_CLIENTS=$MYSQL_SRC/client else echo "Can't find MySQL clients. Aborting." exit 1 fi install -m 755 -s -t $MYSQL_BINS $MYSQL_CLIENTS/mysql install -m 755 -s -t $MYSQL_BINS $MYSQL_CLIENTS/mysqldump install -m 755 -s -t $MYSQL_BINS $MYSQL_CLIENTS/mysqladmin fi install -m 755 -t $MYSQL_BINS $MYSQL_SRC/scripts/wsrep_sst_common install -m 755 -t $MYSQL_BINS $MYSQL_SRC/scripts/wsrep_sst_mysqldump install -m 755 -t $MYSQL_BINS $MYSQL_SRC/scripts/wsrep_sst_rsync install -m 755 -d $MYSQL_CHARSETS install -m 644 -t $MYSQL_CHARSETS $MYSQL_SRC/sql/share/charsets/*.xml install -m 644 -t $MYSQL_CHARSETS $MYSQL_SRC/sql/share/charsets/README } install_mysql_5.5_dist() { export DESTDIR=$BUILD_ROOT/dist/mysql mkdir -p $DESTDIR pushd $MYSQL_BUILD_DIR make install popd unset DESTDIR } install_mysql_5.5_demo() { export DESTDIR=$BUILD_ROOT/dist/mysql mkdir -p $DESTDIR pushd $MYSQL_BUILD_DIR cmake -DCMAKE_INSTALL_COMPONENT=Server -P cmake_install.cmake cmake -DCMAKE_INSTALL_COMPONENT=Client -P cmake_install.cmake cmake -DCMAKE_INSTALL_COMPONENT=SharedLibraries -P cmake_install.cmake cmake -DCMAKE_INSTALL_COMPONENT=ManPages -P cmake_install.cmake [ "$DEBUG" == "yes" ] && cmake -DCMAKE_INSTALL_COMPONENT=Debuginfo -P cmake_install.cmake popd unset DESTDIR pushd $MYSQL_DIST_DIR [ -d usr/local ] && ( mv usr/local/* ./ && rmdir usr/local ) # FreeBSD [ -d libexec -a ! -d sbin ] && mv libexec sbin # FreeBSD mv usr/* ./ && rmdir usr [ -d lib64 -a ! -d lib ] && mv lib64 lib popd } if [ $TAR == "yes" ]; then echo "Creating demo distribution" # Create build directory structure DIST_DIR=$BUILD_ROOT/dist MYSQL_DIST_DIR=$DIST_DIR/mysql MYSQL_DIST_CNF=$MYSQL_DIST_DIR/etc/my.cnf GALERA_DIST_DIR=$DIST_DIR/galera MYSQL_BINS=$MYSQL_DIST_DIR/bin cd $BUILD_ROOT rm -rf $DIST_DIR # Install required MySQL files in the DIST_DIR if [ $MYSQL_MAJOR == "5.1" ]; then install_mysql_5.1_demo install -m 755 -d $(dirname $MYSQL_DIST_CNF) install -m 644 my-5.1.cnf $MYSQL_DIST_CNF else install_mysql_5.5_demo > /dev/null install -m 755 -d $(dirname $MYSQL_DIST_CNF) install -m 644 my-5.5.cnf $MYSQL_DIST_CNF fi cat $MYSQL_BUILD_DIR/support-files/wsrep.cnf | \ sed 's/root:$/root:rootpass/' >> $MYSQL_DIST_CNF pushd $MYSQL_BINS; ln -s wsrep_sst_rsync wsrep_sst_rsync_wan; popd tar -xzf mysql_var_$MYSQL_MAJOR.tgz -C $MYSQL_DIST_DIR install -m 644 LICENSE.mysql $MYSQL_DIST_DIR # Copy required Galera libraries GALERA_BINS=$GALERA_DIST_DIR/bin GALERA_LIBS=$GALERA_DIST_DIR/lib install -m 755 -d $GALERA_DIST_DIR install -m 644 ../../LICENSE $GALERA_DIST_DIR/LICENSE.galera install -m 755 -d $GALERA_BINS install -m 755 -d $GALERA_LIBS if [ "$SCONS" == "yes" ] then SCONS_VD=$GALERA_SRC cp -P $SCONS_VD/garb/garbd $GALERA_BINS cp -P $SCONS_VD/libgalera_smm.so $GALERA_LIBS if [ "$OS" == "Darwin" -a "$DEBUG" == "yes" ]; then cp -P -R $SCONS_VD/garb/garbd.dSYM $GALERA_BINS cp -P -R $SCONS_VD/libgalera_smm.so.dSYM $GALERA_LIBS fi else echo "Autotools compilation not supported any more." exit 1 fi install -m 644 LICENSE $DIST_DIR install -m 755 mysql-galera $DIST_DIR install -m 644 README $DIST_DIR install -m 644 QUICK_START $DIST_DIR # Strip binaries if not instructed otherwise if test "$NO_STRIP" != "yes" then for d in $GALERA_BINS $GALERA_LIBS \ $MYSQL_DIST_DIR/bin $MYSQL_DIST_DIR/lib $MYSQL_DIST_DIR/sbin do for f in $d/* do file $f | grep 'not stripped' >/dev/null && strip $f || : done done fi fi # if [ $TAR == "yes" ] if [ "$BIN_DIST" == "yes" ]; then . bin_dist.sh fi if [ "$TAR" == "yes" ] || [ "$BIN_DIST" == "yes" ]; then if [ "$RELEASE" != "" ] then GALERA_RELEASE="galera-$RELEASE-$(uname -m)" else GALERA_RELEASE="$WSREP_REV,$GALERA_REV" fi RELEASE_NAME=$(echo mysql-$MYSQL_VER-$GALERA_RELEASE | sed s/\:/_/g) rm -rf $RELEASE_NAME mv $DIST_DIR $RELEASE_NAME # Hack to avoid 'file changed as we read it'-error if test -n "$SYNC_BEFORE_PACK" then echo "syncing disks" sync sleep 1 fi # Pack the release tar -czf $RELEASE_NAME.tgz $RELEASE_NAME && rm -rf $RELEASE_NAME fi # if [ $TAR == "yes" || "$BIN_DIST" == "yes" ] if [ "$TAR" == "yes" ] && [ "$INSTALL" == "yes" ]; then cmd="$GALERA_SRC/tests/scripts/command.sh" $cmd stop $cmd install $RELEASE_NAME.tgz fi get_arch() { if ! [ -z "$TARGET" ] then if [ "$TARGET" == "i686" ] then echo "i386" else echo "amd64" fi elif [ "$OS" == "Darwin" ]; then if file $MYSQL_SRC/sql/mysqld | grep "i386" >/dev/null 2>&1 then echo "i386" else echo "amd64" fi else if file $MYSQL_SRC/sql/mysqld | grep "80386" >/dev/null 2>&1 then echo "i386" else echo "amd64" fi fi } build_linux_packages() { pushd $GALERA_SRC/scripts/mysql local ARCH=$(get_arch) local WHOAMI=$(whoami) if [ $DEBIAN -eq 0 ] && [ "$ARCH" == "amd64" ]; then ARCH="x86_64" export x86_64=$ARCH # for epm fi local STRIP_OPT="" [ "$NO_STRIP" == "yes" ] && STRIP_OPT="-g" export MYSQL_VER MYSQL_SRC GALERA_SRC RELEASE_NAME MYSQLD_BINARY export WSREP_VER=${RELEASE:-"$WSREP_REV"} echo $MYSQL_SRC $MYSQL_VER $ARCH rm -rf $ARCH set +e if [ $DEBIAN -ne 0 ]; then #build DEB local deb_basename="mysql-server-wsrep" pushd debian $SUDO_ENV $(which epm) -n -m "$ARCH" -a "$ARCH" -f "deb" \ --output-dir $ARCH $STRIP_OPT $deb_basename RET=$? $SUDO /bin/chown -R $WHOAMI.users $ARCH else # build RPM echo "RPMs are now built by a separate script." return 1 fi popd return $RET } build_freebsd_packages() { echo "Creating FreeBSD packages" # Create build directory structure DIST_DIR=$BUILD_ROOT/dist/mysql MYSQL_DIST_DIR=$DIST_DIR/usr/local MYSQL_DIST_CNF=$MYSQL_DIST_DIR/share/mysql/my_wsrep.cnf MYSQL_BINS=$MYSQL_DIST_DIR/bin MYSQL_CLIENT_LICENSE_DIR=$MYSQL_DIST_DIR/share/licenses/mysql-client-${MYSQL_VER}_wsrep_${RELEASE} MYSQL_SERVER_LICENSE_DIR=$MYSQL_DIST_DIR/share/licenses/mysql-server-${MYSQL_VER}_wsrep_${RELEASE} MYSQL_SERVER_DOC_DIR=$MYSQL_DIST_DIR/share/doc/mysql${MYSQL_MAJOR_VER}${MYSQL_MINOR_VER}-server_wsrep cd $BUILD_ROOT rm -rf $BUILD_ROOT/dist install_mysql_5.5_dist > /dev/null install -m 755 -d $(dirname $MYSQL_DIST_CNF) install -m 644 my-5.5.cnf $MYSQL_DIST_CNF cat $MYSQL_BUILD_DIR/support-files/wsrep.cnf | \ sed 's/root:$/root:rootpass/' >> $MYSQL_DIST_CNF pushd $MYSQL_BINS; ln -s wsrep_sst_rsync wsrep_sst_rsync_wan; popd install -m 755 -d "$MYSQL_CLIENT_LICENSE_DIR" install -m 644 ../../LICENSE "$MYSQL_CLIENT_LICENSE_DIR/GPLv3" install -m 644 freebsd/LICENSE "$MYSQL_CLIENT_LICENSE_DIR" install -m 644 freebsd/catalog.mk "$MYSQL_CLIENT_LICENSE_DIR" install -m 755 -d "$MYSQL_SERVER_LICENSE_DIR" install -m 644 ../../LICENSE "$MYSQL_SERVER_LICENSE_DIR/GPLv3" install -m 644 freebsd/LICENSE "$MYSQL_SERVER_LICENSE_DIR" install -m 644 freebsd/catalog.mk "$MYSQL_SERVER_LICENSE_DIR" install -m 755 -d "$MYSQL_SERVER_DOC_DIR" install -m 644 README "$MYSQL_SERVER_DOC_DIR" install -m 644 QUICK_START "$MYSQL_SERVER_DOC_DIR" # Strip binaries if not instructed otherwise if test "$NO_STRIP" != "yes" then for d in $MYSQL_DIST_DIR/bin $MYSQL_DIST_DIR/lib $MYSQL_DIST_DIR/libexec do for f in $d/* do file $f | grep 'not stripped' >/dev/null && strip $f || : done done fi pwd ./freebsd.sh $MYSQL_VER $RELEASE rm -rf $BUILD_ROOT/dist } if [ "$PACKAGE" == "yes" ]; then case "$OS" in Linux) build_linux_packages ;; FreeBSD) build_freebsd_packages mv *.tbz ../.. ;; *) echo "packages for $OS are not supported." return 1 ;; esac fi # galera-3-25.3.20/scripts/mysql/mysql_var_5.6.tgz0000644000015300001660000022443013042054732021132 0ustar jenkinsjenkins3LQ[lWofwcΥEFIKN$qj;Gݝ:]P*/H}B(TH}ꍾ!T@BTH (9gfgf{/73gΜ7gY*ie|6+9>ɋw岙Xn2cNװ좩TmZ/UYofY6uم$cL׿`d.Ͱ?>Vr Y4,kƣf/pn2gS\d7ǯ) I2pH>I "OmtwhNzCczwLbi2{ؙ(13 r\|ڳN't뒁1ܜ={ٝK7=Yͯ}^THD Xۗ'Qt:Φi܏^CޔNш$:*Ek~xܹhG{ni/{RZrgױEZ{:ҹ,_ :#)6i(]-[uVWb`iQ뺟sYω^'׺_#|zuS>11;)KJjX/ms-ϭ'9ɈH>?1ۊ?nz$?!'"}DI^~OPdyIb.&0B l@252NlQ*=;J$?Ysx N(ݎh'bYlg`΍zhk5m^35Y^bWkI3-iۮcch,}k)?o]ŪHcU%;Ъ+EYWA_ۆ_XxD&;_>/IY=֪Mz G(> ^NuHdb7i8|IL=xl`))@y/"*'mVhe}}=Ksoe;C$=#ysqQi?|!BhٚYЍږuA|Id)|')v_ps+|R);(:y"9MOM_3q3b4csiw6K.!1_3E>D%/d:L?*-H4݌FGzaFuN3 BE74f[R[p|P^(s3Ġ-f,S+5ӦV fMywP.XVQ|֪JEw^i?%Ŝ1-닚IÔs/jM:@f͂-.jEӞӊ6rN7tFf-ʍ^&hoϨP5 ލHVɬI_#8MqTJ&f|]lr,{?IJ)(G9M,W?%J%N4R"Gly`vP/iڟdx~ K xfG9.rFR4 DGɧM.:8S42 R+'v_#O4 !? {iGﴕ{z;% *NP|; lkguvxvBʣ*NB鐛jm^j/j^&IMIlg}3FY/mvn.j,nZu8KJZ_07Pb)Vxy;l^TO=E^6ًln3kU m۵%2T?upL+_j{s(|^ӺU<'";$bISuKժJڽ4|wW/y;o@3_wͽOTj7>$KvX m `ݷ7`P(͒h[ڋǩVAZЗxjM\kR$~#R/:>u m8>yg(s-B=m6gu?[:~?c\g?yߔH>Q$m"CFm`7];:6[v[9Mߧב7W9`|7s0f嶅G>0)W}=t.[o^P% E%[_ӿ3l{E=Ɵ7ǿP|,B2,s7j\b/tc^6$ըT>lw:)yoyp$d˛ݠEbhF]Y6bײi34D 9K ڦzk !(zȡRgO=޸-Cr5b @$Fg3dYH"$_eq>/];s>htڮ^{`v0s,(q%kyi:_ZRyŅR|:Oji,{?AikwiwOLDvHJ19׋ݣ'D7o F &&4Y]u{gUӴnsWζ3pLi[_[چe^hO}7K/S_a34V딡vԚ:|*,r49mݎTD<]{}G|effoRC t=969I3imVUsu*ˇl֢tJQ{OcJ&eYܞ%t(;5;˷@+_Fisr__PXux7b-&gxbzދ/vzx]t{Ed`wĢ j%þ'kMecGT./S\],"i][Lz/?o0O"F?LJ"Oomou֚-e6JrhHqNKbo4IsϥHd@Dw)%7yHS3D&=G5tUsMˬ;TRsD_M=mpɽ3G3QO6koe1UT,-TچaTBìǗvBr4?X\ZOSJ,\$U(ɽXz,T7heY&;ǼBp+~aLgci#~LDF~Lecx2K!jeC9 C2ɝV]kț\THq6wfD e5®NT"&)iP:?;+^[Y&Y&+27H<ރENVaQ3Yy (}>(y`.(dr;38-wnpBlp7:.?|?PNݿTkR$A QLO=#O\k:Hxn_ʼQk`D?~s  WGO뿭5Xc_`%)3Q4=IrX++D?|&tԽ5R›~ڰOOIbĈҭ0}r nkmS۵i)[׼)Q]/&7fS3^K<5{ۡte*Ezڌ(ȣȲx,e+WE\Yy 'juL^+^ʆy|owmQݰn]7*|~8 J_I881~{)7"=UU#݀?ao_xv0L ÂQdclFm7vĈŰk^h% [dv`b?CCwj-Mc\=tcD .{^G@/'/Dͼ#G$u?%Ju?##lW"4* m W[κ m}^1 9)=GU)<ouHF9},\z=LGrn["7Cq?'k~p.y$SF{-("ԝlQ:#ԱuQ:Tb#$M# _2h.})*BQ._O!Rsb^q(87~nܨxtE_"eMϒ[VJK[^/ĽYo/eÌ+*悯#XCs~ p?7]f mO5<Ηc VIsW̯ p' e0wei4+v_ֶjnYvSkyz;}dwTT?eyy47tfm;0 >KSQe95i62c!(OZ]I ]m Z̳]3F٩Y1ъh!->k\آByPW]&˗cE )(2'? n)p?x7/*T*W%%!lO-~{I60?̈5e9f3a5~9T@~|u7Vo"Xb?H!> &S||H|@IJYuH{KOiW9E4Fi?b(+C,.g"{gYs= [N2HޓΠ>IݏT7J^ LpC HxDWD׋0*dSumhP,ѫQ_r4!1:-(ney*x3ZBl8Ƈ:h[XםMhyEXS1yb}8Ȋr/}߫!y-YRR: D\n(M\KE}FFG ٍ؍;xHmm:51獵ǹ-P]D Y;࣏H?>?ou1I83exT? *t `GI]1Az?& G+<gkF#WaQ!cQά I+MO`ytbC۱E33E߲(;`Q>|1_'q;36HSWRe {\GWX>=S"+ =&aDեhzDR#Tw=^vt$>4J{̶e$}gp*Kk{9dxaOlYߛOZJ+@CqBV6[%M^;O䀀JN ¡pȩ\({3c{4nGg{3ox )¿Ui/|,_{:?J,|LKݞKO*wJu7&Rony!}%;A7ݕڥo/iVV/ީ~ -Mxt֛ԛK'%Jf$)x<Ԭ7VjxoW~Lm[dB$RR9]dMJL$jf ^Dη2.]ДK2M}t]Z^X]~tnA7yO!YP]?R'pM~fݩל;7#RR?9%YY"9N)$/Xg^>=-ZYYw$*7lZY%ڸH$CzX躕EƼ ڦ p94NxR5ջnwZ]q[])kNqߦSt*Nm|\-^{e]?t~Rns7VnZzyqOOqְR+Yh?K )S|_/gr?_z]n^k)Ab^+KF*m}jAcaaWeϼY%/DD#ծ>R{OĶԣyڶq:F{~|fv>l+rb8`O[vsՕލ:\">O?{hH^+&kYJNJL}Pn5Z՚DdO=+)o|5`?'.+Hh edӟ(˯qn xuެ|]e\%:\ _&B֝[ߪV[J^2%L8w|߬;#>UWҟK$|^L2˥sT>9˦s\>gEyqtzNJ"OegsSWffDr&3I|>̤ffS3J\/zsoR(Fol2Fk$k\+x[lyo"bI^giMAwқ״HRLRNȱX%)KNsjyhIZ 5PCK@_z/+ii"Q@W(U ŕֶrnj;=M<״7oanSoM-m+M+=i..- /_l(zMT׻G_׿<$3/:t???uz?ygg-,__xb!׿K@7O[b1tO"o*棁ngiV\ ZjwUV֫JU(5Uw!$+}]xW KneeiroqDwrokrKVh8]o s6Dl|ak6B ~6JdC6f)(h,RjlJ#M$NcnZKmsctpkk[ч}5V~MqKh\vU5b4ZxUuE&i\usS4a-zb=c$KݝXQܴ^?{j2t|t[d=$@H\q""W ?r߾x EƝ1C?D[ C}G˅`:PkPI4Lߣ7DN(6]rjT$QNi E֣ E_Qw(RL" m4z4Ϩ{j]P| hl)U# ZGqfbᓷהעQ^mxy_gEyH($r<%'"Q0&O%"<0 "Ѩ8?ω#<+*Q::ʗ%6~<gHoLMR%B"G9>9>>̲k?̙&N%㓓U6N3 Ns_?[c6[>~ulԙn#4۠޽ߋdcff1)]>g $Q_Z^wre{s&۫F<TModvH iƫ[ZcԪj8 y:$:}O\c8'w;Vlͪ8+VptYk4Բlr^2XhTt"ճOdžZӯgO{a~›vr[[ ѳF"@"PN rws}ظ-rzwDO&Tؕq4YU/7޿BFO`kl}1NvC+*z3]Sm3> !# ?Cߡ L4B >LfL\o?߬-3}ə<1_ Ih_IxYG`F$:-O^t)&*dUhO{y?{39s'93ppiQ~z?}ѯ"Ek㷼?rOƣ՗Fc'zBs?O^՟#yFn_2/ x؅r0(:[("Ͻk$}9u ^z}BC{~1zv=HV_3$zE2do~OwonQׇRZԵݺ@8?m:! C W'4=_Os3//z߿= F 7?vQ^KVn?J}/ܒ]K6>C!'r=辽~<u?-?;-LyD 3ʴ/>_.}_jPW;i 81c铮Z$= gFB1:#yrrzwGꦀ_0%Dǒ<ݕ~7Jߣoɢ~4}XA K[*h*whBV6(DQuO-TԢ0V-qTS׎wDnUZ*7RK0J+ZR̭UhD. <P&"i8hummTb9 7_)|k*Mh[[$EFuZ/MQD @1*ZjRʵXE%34UjFX~D90rb ~8wN'.2FqA(Y.PޒI_D4ZyLrV E'&L=<ezN|<]\7e 1F/F![2]gja+db#b,TeO+m,V@+Ж+S~)d &1-9͗]pjdY[{eFce26ʬɜv:5J֩1wbvbnyv}=/l5-^cMv-gb}*zR˹l9sZث(*b\MLK>moM9c]gZN\2BRĕLoiO2B:Rd>B*#DFI\k\l!\_S^)b.\]d|n}Mr?VRXR'\Vē鼒*| ~(3,ʤjA$̲'U^NTrkb1[/Nev`&11?XbSn$2|T>'W/T YO/~ق3;/ҌU&kIWb3,O:X*啫+.8dZw"cqjĹͳkk8z-~Tv%MS[(]K$keMW#×M?'RZf4=~!iO矍.wn?+X=_}?|w%sKz貾JĊ+Q2Yvra89oY TkWU@zX  3!~մr&  i'w^gvS XF{hNߙSRF}۪Ujjbuғ}V: mWPTq~_rۓ{|y?㈌&bUBg}wN2SɃC/zY.W. ^ğMOr?31WG0%s5NQz2^9=k,{$B/ܰz\awJá|ʓ|CY^[fE!1rXDwQ~[>UI=Ϗ{q4f!+GV->)/f3+r[*+Q٨vr* r;< rxM7Χ^lo\ 7=o'hҵ׊]+d+*ckӓXҽg0ZB)SOZu&o{ѪSU!ϳ"BjT{Y p&0x15389x{cIӃB;Rg:$oE/cl1_*3Ϟ&| O_'??~,?1J_-B(a)VNgA3gr뀄NۢXjvqGO!m|qS3 9zրZ7373N U [WO{(a!krZ#?z w};fzPSL!^X ӳBg/sѨQ4n"wLrfDV¦mU׷#47ù"azf 9CQ3{=ARQ@m0 pfaFl>bf1aEGmnxf82 la0" FE)O c"ŧm7 ػ0x8=kTم=}NxrÜ0 a.(tL^nXܐ|e˿C/+ܴr\RRVTUg- jΟF7Eh[0R&C=xo;u;3v?kD'5Ex#t8;rTnvz fu'Ҋo ml!! C(}?{Onm-GmQ=C>K5Ꮌ9#T}t0xtJ'ubխ:r)zf&f緵loK79b癩I'sǯGMzrP_`O,P+?r|/~8~zqXV}g3?xSvX9} /H9s,+vǾO~rªN TҊ!EV4VJBِ%wEk)߉̴S;OwT#J}zokE%009/7' g-u l;_QDU[k"(w~ײC<ҍ8{)5}.xXJU%=Z+zZzhe_uTn׽?䟈'(yB̴'A>c_ֳe#+wWf5S˯\2٬^.S7 e-WҖQ0#K=<=[-REԳ)exy/oe᧊,D[% N,ǣ2bdr5-G4bb Z,Otm*OS%"Ig!GcJ/= {BaqW_gq׎R߈Rv?oX'#$ejc#Mqľ!\3u{e9ey}P'ۭ-D"!K$b_;^uOjL59C3F_cqQy=b>7Zju\..ґ4gRbLK^J?:-{!*+!YP\rvI#rkF>C%}xیB(\TK–3ei{cWX;y{ËG z)ײƐP&5=gpK{qm/,BGkOucc}ʅf?p 7 ?)}c?oOa{Z5&ƙ28SLVq̈33ά8̉37μ8qYga8Ʃ8Β8q,쉆?;>fO2/)]i_S.4{?S.4{?S.4{ѧ\h4CO>B >Bgs>Bg[|ʅf2)=пڧ\hCrs >B ͮ6)=пާ\hCK셆>Bk ^dݧ\hv ru7}ʅf/6S.4{OzC>B {}ʅf/5S.4{]r ^aߧ\hvO쬡Crٍ}>B  ^iا\hvg3S.4\h*CAr٫ |ʅf1S.4O쵆}ʅf3)O6Cr _ hʧ\h.J0S.4JC7>B ;̧\hՆ>B1S.4{{r)}O '}ʅf`ѧ\hfCO>Bo4)}gr7O ;O[ g|ʅffէ\h>B^9dփJChxo}yi /{pTslޛQyD!<$HNg, 3(VqN3c/|GTWVԶJoݻ 6{{νws.y®|wGs67ĕImQ>J}kt,)+ BQ5YZDl}UW_P" CGөΠ:jYTΦ:\6Pm:yTϧBP]DucTP}TzSTzꯩ3T穾@TG0DT_z+T_?P#?Q}Tߤշ_;TRտS}{Tߧz꿨:Ϳ!Տ~LRT%U/%=t IQ^y,鬗ATS-ZJj9 T3VQAu$jgR=TGQMu ձTkCus:DRLu թTo-e{?#u9Z%_@Γͣrs/! ! w҇=>Ǥﰾ,y+;S}П^OL/SK8H8ks`U|hya_¼@I0wM7z{Ea@M!R}ɨsjTWXѡ*Rp-Vf^Kr K?FXNxɪ,p `iGUDMQú/oh^bgɫJ3dyS&;KY[VtIЀ2KjyyY'ʸw{jkX<NG6Dét$ʄW[ s[+xBs6V9xwrjFsCC;{>xCe/Y0{椚h2KTOd[4D{XX.+"\:д`NݴG Vel౽T:fJ+s >Q%UĮ74mKYitb@ _Yhy9j%l#l^2%[x_u3xUroXqI`SzijnuO_3٨'l$_1cUދk6)c!ʗl,ѻ̚v3|MW lcmCe̘2q{p_.^Z^N}Fۃ*S6X N|셭˛=wOlVsg2>լ Ι#]mYϫ^{2Ow6̕uݑtdC$ #Azl}!;Hbb9u֩gz廂%wP.kT,==Ed*rX!Y%D[ִ:F9p.'6"/lC,E~w|Co*(}=Ew X5 B*.ovqnIOinET`yT4qż:/ tL;Pd''D7g#"jDú=?]GnWN0:fe=:%/. Թ3ռxyK׼3촜3;FeSNցK[ۗ7&elN uMXݾy:LhSC|&cEe."jPxR!!pQzC!VɳuX.WlkC^̢&'C6SIW7dz`x|kN} sE? Gƹz[VS;|\u3g>u*+ϋms#d";N }|cwv_K?{aoIi_d0{_޼诟@7\|G߽om~ݰvוy~|oviTS {_CvyTE?J`aF JWh:Mn Mmm.$]y ?` .~ouar~Lb_y.aT^ b#v"@wOoYI_?Skܬrjg[ar4^c9-myXGK۽JPa\I#C] ڢͩUlh5 . AAhs($n "pj y??KaVҫrb1t\Kޟ<L;yU9SZsbja2eovB7 %>W@+-+;ߌ!dO +& wNjq|XY櫿#{!dO +}}XߎTeGCȺ@rJ/tA;e~^?Ϧ ^+{8|Ge6V4"NۭK^qa8Eӧ8XR,riP E"ZAf2\QLCVo?@_`O<9q>grw}-tEorlR9PI'C_3xyJ|r~I=lǸ(DsTCA79zF?2O@!r+W{͡o7s(Nm@!r+-BckGrV}(DsThwJΑԳ9ǥ@!r+6zC/9gm2cTj|"xOMQ{}3f\IxRlbdf )Q )& ,$Y)Qk+ 6׼g179v[^߳2?Q1sg_q`N4楽/9/bvgf- s0/yѽ4nٳ'3W׷4|̉&W4~wߕ2itkT>a`N4 o9=׋邨Xxk֮jEkw6΋hھ4Wk9=iruq%#W9{FTM:CEVWK7P/eٗ&}{_bж1M˜_$ b$VBXvFi4W0|OYSKY:} ]MA$˓P=qvTڣ'O;8|CCaiCZ6BWejK8ba 4#y%^@J wx-SϿ&%Suޙ5nD/4S:x1r_yl\𙷇^k!`Cb8$Urج Yb:u Jr(4iU!B P ZJU!VrC̾󮉘i#%;5 7!`XE ޣ9AUrWVz{ܙaӽoY1ۛ 7k(n V.3֕S}wJS0k^e\+\sO:wПRs?O-ҵ4抮}AigNsE>Ǡ{v+7 okk;]<iNsE׎NsE^k;];aNqMеv+ANsENi 4Wt 4Wt v+vʠ抮}A抮Ѡ抮=hпNsE2\ѵk4WtA:;]{ؠ抮ɠ抮=b?fkg 7i9Mv+Aako6抮}Af;]b4Wt-\ѵ/1bkӠ!v+\ѵ/3抮-iڗlko5抮}A#tm[4Wt\ѵwbk4抮}Av+UiWnk_c抮mmv+w ϴ\ѵ5抮}AYv+gi7ckh抮}ou]nk?7;]kk? 抮}Ayv+msok?wv+\ѵdпNsEmпNsEcx;pr.Mlܒ[6u0 yYvx_R ϔ۾ow#%?2Y4F T _'VghS!Vfcᑤ|g;ڦ4ѱfg/=mUO?%|@Y Hʳ̙9m$w˖f>b5 s õ?m Aɰ5'[wqWȻyeNqLrphkfɬ%kz._QTek^Ciԩ`bs2>Llf.p_ CPˋmX2ֳp~쩓flYOc\r)ore6 ̕,G3/ʌl+卧LU]B̐TSX2v/ruO_;"Ea)|(_Dn=%[܇ZBlL?T_xkoY[}/YxvT`Cr_UZ?ݮ'7+x?rY=JSEr!ӛ ֥+^au?yb3jJ?soSlYGqN[jf}a4"l+c|?z~zбsfN;JM&Nv/=mUS&o|)w)o| |YB|9d~nz}2IۙqO@"2JwY,[n{AL8W YUʘ{)G+S:o;;!Od5|p> Ü/'^+]d֡/]{ (ܙj=,֏]N"h6w`Hl2N %'e5[ɭUl.iHEj$84D6vȈ {}0v%IG2e1r7f[t{]"$A:c਼Frp%IV SYEh Ѭ[Cx&!ǍBܸn9 EVQ`wEei$$ =,Z β<SԱxV !Y{NB4|K%Q5$iа׋wyT4OK $FRùT&vռ[vQG Qix穣C5^lw GKU-Q!4Uf GVFnF'"=$š)S3uoܧHHl:EΦRo<""%1As%)Sƻz)4%wT@;'cZD$ L,uqg[EM$jx6Ql.Nw%BK9pdϋIƜ=y)QhJzY&+_J0_— ~F}.29_)3H@ 'YC%HHܿ23 R`MfS3tr].>̍l_p А` / .3ID5xsuv0.?ůbQLאiDn?M6*j# Q0) jQO2_KgS?AVFQ7$ф1?~WO5K]Br|sr$~Kܷܽ Sr:ݢ1@.\L:QuJ Eݻ$ F.?}ܓYd޼tF~#5'ό2D9c45E񕙏12+gD|]f4]x4ȟh ^ 'f:/wYUhD $beɔ t@Jwy 5IN'-[CRr~ONB4$߯=HLl$y~ʼnN)oH8Is&}jkm$0L/XbeE嵈?*y̜V4$FhT,~봉!XM$t?U(ɩ!9'9dFya= LuYDM֟ %Đwb5o@nP3 ^,w 9:W)Yr#JG=soU:Gn ggD _huӎ։tifmmkHl#;䟱p鎴pmǚ$d!8o" vS ԍ7aFٍAI7etz%s5Ƚ}^'uExիߟ"qv!l`vk 0'O5m|4NJ:Tf6mZj"%KZ-.f6-Т9hLjj4eS?\DsJlܵ -ro@`͙΍Av<9h.KfEaWg更kRGJ?p醯{z[:{[[Y-S J\u~ ÍbiJZT>+09[7┴؞r_f%~ Ú7(i+ݵoaVx8*ʊ=//E ^ʥ3cXu7uǵf]g|"o:ҨI79#En?x9B0Hp&2X Z 7JjSe[sE5[9/?;~a%Iw3o2}RJk__NxW)•>*~{DC)QnW3^`BddUHT;ci|%^0s?vY~z>ެ Dm"уk=826'eRfTDpܡU|PʌN|PN9.O`Buy|LtE|\Wte|BWuUMp+^Tl_UUZ;QqQ'oz]SUUWݺ6>TکO<ܣ*^*OM_uwb@P.Ps^+?h]Us}/F[(qAG;XJ88B)3>[zkx?{=co/>=pgs|m(NC&Ozzfץ6Y>m`WXxe>5B|Vl- `uZ폗Wj~ Xׯ C䆶IPO6x-WO+f#oƕcy+i(Xݷ ?Y_Z0Пt:w潷t˾]{ҝs?{#XO\[]:曛nxxu]W>;a=5ޞ 1!OώAUv]}:x정E CDalW@00o]Kv ĒE yIKeݝyv05ig]Wm^?}fz[E<0&_V?;C>y4tka k#' T7.Gg0ϞtQէ{gZ}O|↢X|;"-SxaT~]k-UW3'0pG6ks+Fߘxuډ};_۽}tāGShq[Ne82Lq[/Is(?;FW|iokw x }cݒV?}asb}h:ֵZw8C3~œNz]Ϲ>wxgb3ˎ͵o;}}fgO;2gdh!0qp tîw7_oU8LbQO//}3??o(ֿ=0w@w5<3FS{ N̍[\£cgO:(}oMheyJۀB%EZ=yP(*)miT(E\~{уK/ mmoETCיd7WDyev2Y睧=W;YHWVtBI?f/vfhf]KZ;OȲ}@[$˳gaei/;ډsͣc'T8}4αWyL! 6c8ci|^Mclf5zڬf>6roKf -܊eοf=a拞Y^?׫3L(c Y,[Z>ٜ++u'WJ/d^NVm=vC I2+kɰWj>oWH|e. ?3{ F+`$Uδ~,s(I/o.Hs` ]l?۽+Eayɽ"dEe"Yɪ,wcdM)Y$f]-eY 񌧜sy?swgaf:^3i|"ri|'׶yy:cL  K{հoY·ȳpdHx\|k ;wp>FxR ȥͦx ΧȳX:eHx\j ;|<#1j ȥ_h Ηȳ8dHx\꫽;ۃ|<L  ?+E#e,JןM11Ģdݲ[ JQbQf%I^ݒ3sN:_}gim+?W!,W\ )PH?=Q_o سT?IHyB:щ|=dQt (汨?ݙl 3]H+@&R+j7SQv3XIsL2\Ը~T س|\5>2\ژ:|=dW%DsD_mk@&R+JkQ_h Ϸ3]IwHyB.DdxǞPg9Ds~ݹUDQQqGE X(% FbLXY *y&7yjwߜ}gggtZxFggs36?gv|]S߭ܝn'?vlNڻo;K >iݗ9[tlbhl->b+`TZcSvlhhsW}10*Mz?lY|ۡ_;taQiڍwf[ggsC5`FaQi5sWX>o;)+;A޿|{' {jʛ^[SD}5MdߐkʛȾ)Ԕ7}KϚ&oMUSDD&oKׄ^ǔ$?쥉5MdߞWSD#jʛȾ3?e#kʛ^?jʛȾ;tMy+Ԕ72?{jʛȾ7?XSD}5Mdߟ?Ujʛ^?'Ԕ7`Ě&JTSDÉkʛ~$JMyُ&O)o"D&$O)o"D5Md??'Skʛ^VSDD5Md??$W@)/]tnp;_h?]߸Xh|"sdg:-VMB +7gBP߉2: \d|RMοor??趖o>+4>rJ饽+tsXV:F-@.2>W@)5ο_e6o\t3r:xN,+S)B +? ܽ+Eq񐇫)LF%d UA AQLʪA20AEw?r'3y8O:zm>ӌkI7JǍleP U1OΦۿlFP ٧y8mlTP 󑋃_ !J \{*Z8ZI:RQG/g/KBW@ [/I>Bԙ:[FP ۤ>]!JN`(D{T[+w{[s!Dݩc-5> ^=KVq_H|:7.Qm֦ডbkht ^C. Dܓ8xsr>z|znEN,FBٳrQP h˼92 cv\?h|Ԣh} cWl4>jQ_m`*-aaKFUF?^;_/_EG1}\9j4>jQ_md]/̏GDž3\46i7?l?H3}^L66>wKݻi0v@-+1KqI%`\" NuIfB6ԥ:(8 uӡOP')ֶxQrϳ$ս!Dq-[*h;wvCQ9(5> _UǤ?~] !Ưq+5> _U4~I:nѣ/M3RyPE/z.l(DT7ILQZ(DTQa~ !jJBW@-lοtBJ퓌(D?KUa%s4A%Z%p)pEM88bwi^B[[A !H(gGw!ĉ˜ QP4ߋӼ{BlrwDr=w'/cq0fްDY/~i߆;1;[*_HW@.lW!na̾-Z hwћ!^al{@%RD+ӫyb0ޖ kJ+6?gyg|GqP[;K׵@%R!Kq7ANʚAQi-ރV*(" E̥Yjlŋ ccr9wzANt|k|dbPDI- ˩1lW6rL }I:Oa%5F[ՙ\ j\οta55Oms{$[Y|ަR.kTem/>[UY8?:l'~|ޥW?dmqt+OFۿ.Cq9qj(% IfAH Da?IlF -:08IHz&zR HkۻAѼMQ?W =+EŹu,;ܫX(,2ȢWg@&R+V_οZ 3KwHyB::TxŞd{k|d" i1uնrxc`#ak|d" ipmh!/Wy{\ )PHmǍny χ3xNHyB n}GQo&<cR\k|d" `o'/), $rLr\tp:;:M<+õt_EsRc%hEsRbg6>l} \dEYh|"=+Eˢ`<.&EEydPdSvL &ѳBn8g<)t> ?7Q?^= س0Ywg{d# \y:UxŞAJ)Dsqw7T> fg#@&R+&wοagޝHyBڝnEhx>ƞg,DskfO-L|S<_'rLǞdH (ƽo?iY ϗس4Hy/۽+Ea1+%.R620$bIY JQw2YMY0PF\/MIx9'~o}x=[fƂ!,LK@*+ /O~!8#A{i|"riim'?/),\ʙ@*+ ~zc;8kg,fi|"r鰳v7#y@*]+ {G|w58_"b\4>R\}7v|Ğ/\ )PH ջ߼m3h:JwHyBj[=:湑|=dW9Ds^V9 '@&R+vjQ1u/gۑHyBZ.E~%2\۲6uu?{/=\ )PH'Quog5>2\_M˔eҜUK \ $>JM2KK pm[h< %hSR-VEIqlko>.t|Lń+`&}`ә-7wzck?0_:>b0~th\S:>b0|h_w{ +S1 I'ϼhpCrxGZ(S1 I;n4]p;/S1 I{|hWvs?t|Lń+`&Ϳ֗l?.W"idkCfohPS25!76RS2{SCُ7ZS2{sCo5!4^S2{kC5!5_)&sMyo)rCٯǟMUiRIJdK,;0Z .\J pҴ,˛e B\ -q:ZlNҾ z_.{.[͙0p=ќ"ߩ)/"{E"T?S^D^EW/)/"e9Ed"Csʋ>\dNyV)/"9Ed\ѿ*#sʋMNyٟV)/"hES9Ed:}]Ny'?>U1gsʋly)/")/"1W?S^DWcsbsʋzM_)/"\E9Ed[RNy+_)/"BE+9?}Orssܩywﮯ;S_H!?WD꿶yՋVrހ7SsL/]7۝|Đ+`"Vjv.{[?;u!?WDm˱mmu.{{ `$\ƭ;=~tssaÀ7SsLMov9ӹi9RsL,jwz9۹|̓ݝ|Đ+?+Eqs;[dbJe]%E1P0U<;L2sxƓ9^ۯ3t~Ϣpz6cpb:Y9k|d" y*wֶ[ϯ1\K7rL2\4uO APf5Y3k|d" b F@el)a5>2^E4{Xۍ: (Ŏj-Y{k|d"~uAPj+z ~۱Kq74'.N.$F f: . nBSPKࢋPNBM84 r㍇ry?yl9"=,rdP hx1"uVEgy?rb."jF@-+ w'3Qa>56[;70fᅯ&ZTW@t=;:"lwdh|Ԣy6""ϷrgEKy?{14"=.}?ZP n!Kq u0swSGĤ"AL6fSAx,4Ȓ0QA"a5=`p8'/rw}>~^_!̌Y?4>JQ_u4uqv!D3ctoU(:j~vǾ ueM?94>JQ_utvk{#!D2c|qԬ4>JQ_ut0=+wC!Dݙ1~W(:j9GOO!D3c2|oU(:M;B̘={+RW@}_J;Ͷ7!DIfL~5+RW/ ۡJ%abG. bSA, 6f܄aEQAM˲ "ĉ23JQ_u}5v'=w!vf[wv+RW/K]q LEP?f2再Yd"cE&q`+1nU dd T'xJQY5e<{Kb'36fwrו@)+~>I;W?Bx1_W(:ڟ{+>2cc _?Yi|1Ja?.c j]n"z-bJARDO SI"Kjba&`aT!p;a)03y/iJQ_uxiO3cs*RW@=YL;'C̘+RW@=H;BfL~~+RW@}{yvgBqzWۯ4>JQ_O]U陙>X+I:&@Rˀ$@!M 41g捵 %O. ucXѐFcP\aCRc+c4;1Ebo9s}{Oy{)lmq澿^_s#q^G}T}hݔb_mq郩E"+~_c;.W]fV_e>TNZX|?X4Pb|x4`ġ8zGTnԃeyG8/l\;ΞbeO/ĥ:٪/nJq؞\]5uR'~_z?N)]wu=?Zx_U}򇯥w׭G9pMb>8h?`pJ@ ֶ<_ev~V^NtN{ᩅ'/>2͸7G1>șj3\ԃr."sݱ/hx98>FѾ̗b#.ĥ<1pЌE]5[\__+SI)2[_q.HcWm{pݺkx\{sbtbYlTIY=[F3VneWޫ;WwKwPo6VZlVN/bwUmrSΗcV*o^i_v0Nwge-n/;ume_heۻǝyvl΄"a *S\576G%D^hvD/2HHSb "6"Hh<={( }{ioW>Zs'WFnrZ;oOk&=g.woQ{Ek@f@ E T.f(^1O`.{kxn?mմ7ɑ?/v{Ž<O*0[VكR]^Z+ -;ՉўkP6iQ/nk0WdVPdLh*)Gt;eBNFw GdF/srBVgnΙy?!+P46ܛ*<LJxlN1[wJ=wM{d e 2d eHZ"`RNY"ਜ6Df)lmsr Ny/7d;J[!y }~['UZa3@HF~KBu"ړvρJъc+MQƭ?69l &?,g?6qh5+MQ65%rOuj[njթ6Vw\um:[5aNmStԨ:#wCꌎa :q:t;LuWwv5Nwء^;#KK{_ʙs?W{ʡ_oO{K'A~j/uߚza_M?.`8tpy+sg;0vz+ &\ۭ>F[?/_ZƈUq߸mrG&ԪJ]/ěModtC2besL]kYrwDN&d&edsԍΧN\wۼex d׮J[Ax&` 7Ϙw퍼p5Z-*EG ZAL؝G09w]9ج=)紩ΜƄ)lTs!WcR/bFշ~e'D@٨4igPڱIv(`Hrj "p4>PB-E] X%3\$!B -!UcS|C! *T0C>QU>% m- zJUn>_ƃtޗB^G30.*() I~ \\TWIynbP־&!3UڇLu.֞&?W,xD1qbgIsXAcâ ~`XiaGy #O4WӲP!qowosoݽxsQYNoV4[m Ap<]O3彙cefG_z'ԺU q>VwFr֖MIyO~8%#[VQ'a)$,2 Ni Km6Yu깹پ2j:lORݲE̓ݨ5>9zq%/ʇR&M0Vk7|.( U>ϯ0uW. 3=Gc<_ڣ \,@S4%LJP>$MFT+Q:<)Dvq?vVi~/!cqeIP$d" Y.* Z & pPgcIȉIȋs\R v%(f.BIt]0!I}$$DSQO msI. yh1Yֵ2ydZ % Dz,{ede&2ydݫ!Bֵ`du?ZXw"?O9?= iO`4@'??O"s496έ[n\ټ^z歭G3; c9?؎xDZu_^kvTyҩ/IZZoMW;뫮dKS~֐˲8Ӹ@@apdsIH KW%Av7wgᾳp-;ݭ;՝{ ;5=s>0 ש;Mikmׇ_p`?fzCv`$GgNtS_>=CDmULǩלV\h6m"x¨eWk YNOK5M\{C!a)S!乐]~KH!7VY.pX,8^"'$i/`4@p/'_Brlǽ[ǖ =7m< ׹<S 2b[ 0`;nz_qz/1w5}R\wnڲ4)Em%;Ô.gXv9ô.gN9qUɔ%Mm[C9ʊ' Qk,;ض'XZ2+M\GtF ^*ңBئrknuӡ6Z4!Z/.E֠ X6I*H^49=˧rҸTIOˇf8ڟ*AsTT w#eU͟4| պH/nZֿe$tEߍDo0NGG$AŠ$hBX=Dž IȊ\\ %!_Ӓ0.I(=%(>+ 3\$'IW$aKBB㛒·e$!]]B!HBq/?P\gj"F/e!T+I`JŃIk ,X.VV ֵD#և6I ]b,%/8WkzYf]3;]f:o;;~s7^ݹ_qs?Ox(? <Ko_jaSkZ':jPkQ3&Ӧ$%W,)>g"9=-}SM^|f%!%JAbg GLGL)ĝ3qzv<^޽skɳkz,_Ajt_p;2 orl_qW)ϷƄAZxVb6DUɋ*תd2h/SQ?#鰆u*_0؎/S#t%^Nݏ+Tj?ܧd()}Jw>)]O'+IVC>jê}>Y8ggRj#}NRsgo}&{ULuULG}*jê}=}&DXW(Ku}XJu=ԣMZwEӻ6.Qr\Gf#`b$,D#`QsPe0 %Q+P<. 7%aRے<$!)*x& )qC씇P?P!wP!w'b[.τ3 < ǒp 28#s& oF'$Kk(M34n*Sk;_8{(Tth&yvR5|du4*JZ >سf(kNhC;Kf8%mnRӴWy-2-sBl6&&ohquRW=2?}Kb<2?AJG%i\AQh=Ag/!lDЏqw#Rf2 $usɢɢS?<5s,:m,:gLLgw׶v7vvnsol_{Kq" s Q׳'0_yz[jJ ;3/G'z6~o%=Է`sfvӡ@ҎnWLʐG1aRM~0iI:Ӭ5f<ߙՊ^ TIH"{^9Z:?/!ʖ$h"Q$x^;-MM ˉ\Ϫ}OL69m0& ԢĺGU^pZX6w=k՝;{oOq/O$kaffX&)eCm/`8=}GәxmcY=3m&T;[wf|=]PNdn43K͇HhA~P $PDE P)B!DH<ھϽמLv&v9{}mQt!~SŢ\.rueg6MӨMӢv J4r >F]M(]mj~U`#8]Wۛ[vxs{{c:777Vf-̟u$_{_68(-7^y+A_r~#B|w^qTk_V:W+uS&)W_IZ\I]l=F%׷Bn/{ ?AAAB\)q)A))D;_;\u{s㐅fΟ_s_ UCBZʟFP~C~.4a?K94(9e_1Jfeaʏqj]ßzV,JBt?u-Xud/jXS2ʗ6CO]!>S=@s37< '?0 ?qpG EͅO\Pt0G}y>uO@ݧTSՖQQFnNrv2&j'9U;}smeQGͥնa9m:{ܪ 8;*RA Ŏʺ{,v4AiJݪg83bI\;I3c'Q.񶸬DI8z_g:[_2Deqr|Gt𽐻>d!E/"?A7EPmLA$e%v{Dy`%8Na)|X;w)R٣SG 7JL17]o(Dŝ"{+ŮW^xsϝM+Is{c) NݬW:k>ig:\n6k%.yǨGsʥ9/zX4WLs~4WLiѯŴ~_1+~tگ;i?+b:b)IEcyDimUZ~TVVrNV٬SY2m"@N_埧7 )Oy&<vjw3O]޹lƭOu?[=(fM??` /gw`D+W3DfϢ4EW'ΫfIJX͵Vՠe1>SbPFY[u\scPٿqrq'Yؾʵ<'OEcnea;2^]oYU)2Ԩ,[c%>Y7,N7H9ML\K" R\a%i˧Dat~)L9嘫ܥ_eXA>SK?rBz~$q #+vw[*MOߤ) n= vg3TdGSwݐJJג!_Es )q(QQ/fTQE.e_'dXveP8`wCH{gYGA`di7oN^_`o[O;0 "az \LXE{KU(e\6)GZ}ٰڔi(Pj^KʀEIXaeWVlki::m.ʱò*ˆ3%E-LP[2Iie/bS Xf4xe+hjHy V4Xe8E''KvMOOP[drJ LA/3=](Koly@)ux$e@s6d(i,󔌻/- A,[ q ^WEI)"H+"ȔtlƸ? o忈 7 ƹ\8)"(sPiLnAIXK0gEmW9[%n{n۵&wX%o[)_%[!ԭ :*蠊jC*z*+n~OoU>#oo--a#/0A , /_\퍝Owvonlm9SG}m}N=v2;6x-4o´DBE,:,؛٦lϢw|CZ$+=;͖ܗ(RիՆsiզYS+j}hԻw [;kɡ퟽/{gg~~nb/ ?8! pǏw`pLP=Z/Ca"vAE؅զ8gJ^Iҩ6Mu %tf;vE.+ez)id/n.5;VŴ\%?AOKNU?^YZ7>xjfZ8O*F@gmv*k r5}=ۊ4fŬY*^KN9-敫\k*'p&zͼ?}Xj]ow9INe_2ʤv]KYiW+V$zWUݬ\i[^kNf^2M,[M RQ=Ҹdwu1*D4& JÁEϧRJu\U%v:R{"Hs2e|_YqrE~ #tPA MA&8${DtPAa\A:D3 "H9&Ԅ>-tT(>i<#~EwXA*{S[zS{pS{ö)rj-NrSnnjM Дn5w_Rޔ; ގԠ{roDR[:g;Ol+K&Fi.;U4ְ,bn2EC4$ڸlI$`\ n^=bh 0\'TWH{va|O5AM?}k 6& &g/uәkC/82)@a۩r DQ &IT>!A9U8j MlG3grfK_0__w_@k뉽{tQ^m!`h߻\K(c,RekKr;ӻv9~Yj]MkM TT]zwa;혡ϝ0y]݌hgX6rF$z_G5 t+=s\w7 ՝[vap϶:Ht_4+cAo nbA~+8 b5>DV "+Cdi},!\) !eW)w.M-;Z,*pldQE-\q;W["6r }'NT8veꤊ7ek빫Z946g3pҿI_d1 FFkFx:bR*Oc?'5]} ,={1sJW-U@߆t'>2:oC>[ &]ی!77<23kڵoz \bu,>< וQ 5_{l(]o\l{G Z(K]ʽ/_WpޗԼ&JKK^ui.z˥k^0n_Kkgvhc]K+_q91>MA=x]R] te}rN"}us)}aG5QJr|U55WrƹdתGisa{mp=\N7t$w}rco\cdXXrSpI/PWe@@9?19kN iHD:]QrGB]/SM*:^W wzҥ~Đ?Z*X`A"-_Z\0Sb>M솣P6'54IkdbH|d W3Q:T:T PUȞ @/۸(n,oO]}_|1[1W1_1/P1/Xܿp}jY%,kGVơk5kMVw>_yVn]j,̹gٰvPJӰuPаtP: 9(hX\gK)Oe*hDC"Sk\r>tC/55?7ufaMPQ.YtPwhiIuh? Ι6jpKuy8å;1i6;7 򪎨gUQʫ:WuD=OCo޼Y=W^H{tJx3^)-5`*rNQ5K)6(Us%y^^J:y !cm!ÞF\2p*^8#< ++B(rqF\2,B w^WވWގWn +|es? |e+,|,p>Cx!dS9,|.TB !SQ&2 #BE!X,<.#La,4Pc,BR|~!ª2,l!+BȱpC}|~kB(BޖC9CrY@! !BrYD! wBL%P{քs!BaB(0! Oa|JB=?`YX{ a?\p;Î|#7g ۃީAG ݕz<\؝,tnS]&G N >x Oz7պ>7v׻}7=Eߎ7n~Gߢ۝ύ̍.?6oȍnVH7vw# Utm6(m\[;=WGǑ-܍BR-O9y}}}‹BHr#!?,)CZߑCȉ!drYB*ccc)!FctR!B]mOHE#,g"`_ n v @/E+_[}.nI,GE ܒ,X!raVZ߹xmJ W>\:9ۍ;lիݶUߡL:rhtAl}|/Hgalera-3-25.3.20/scripts/mysql/my-5.5.cnf0000644000015300001660000000023213042054732017411 0ustar jenkinsjenkins# Default mysqld options [mysqld] core-file innodb_buffer_pool_size=420M innodb_log_file_size=100M innodb_flush_log_at_trx_commit=2 max_connections=1024 galera-3-25.3.20/scripts/mysql/README0000644000015300001660000006067113042054732016664 0ustar jenkinsjenkinsCodership Oy http://www.codership.com DISCLAIMER THIS SOFTWARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL CODERSHIP OY BE HELD LIABLE TO ANY PARTY FOR ANY DAMAGES RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE. Trademark Information. MySQL is a trademark or registered trademark of Sun Microsystems, Inc. or its subsidiaries in the US and other countries. Other marks are the property of their respective owners. Licensing Information. Please see ./mysql/LICENSE.mysql and ./galera/LICENSE.galera Project source code can be found at wsrep API: https://launchpad.net/wsrep/ MySQL patch: https://launchpad.net/codership-mysql/ Galera libs: https://launchpad.net/galera/ ABOUT THIS DOCUMENT This document covers issues specific to this MySQL/Galera demo distribution. It does not cover the use or administration of MySQL server per se. The reader is assumed to know how to install, configure, administer and use MySQL server. MYSQL/GALERA v23.x demo package CONTENTS: ========= 0. WHAT IS MYSQL/GALERA CLUSTER 1. CLUSTER SETUP 1.1 INSTALLATION 1.2 CLUSTER URL 1.3 STARTING THE FIRST NODE OF A CLUSTER 1.4 STARTING REMAINING NODES 2. USING THE CLUSTER 2.1 LOADING DATA TO A CLUSTER 2.2 CONNECTING APPLICATION TO A CLUSTER 2.3 LOAD BALANCER 2.4 ADDING NEW NODE TO A CLUSTER 2.5 A "REFERENCE NODE" 2.6 "SPLIT-BRAIN" CONDITION 3. CONFIGURATION 3.1 MANDATORY MYSQL OPTIONS 3.2 OPTIONAL OPTIMIZATIONS 3.3 WSREP OPTIONS 3.4 CONFIGURING LOCAL MYSQL CLIENTS 3.5 FIREWALLS 4. Using MySQL/Galera in Amazon EC2 5. LIMITATIONS 0. WHAT IS MYSQL/GALERA CLUSTER MySQL/Galera cluster is a synchronous multi-master MySQL server cluster. Cluster nodes are connected to each other in a N-to-N fashion through a group communication backend which provides automatic reconfiguration in the event of a node failure or a new node added to cluster: ,--------. ,--------. ,--------. | mysqld |----| mysqld |<---| client | `--------' `--------' `--------' \ / ,--------. ,--------. | mysqld |<---| client | `--------' `--------' With few exceptions each cluster node can be used like a standalone MySQL server and supports most of MySQL features including transactions, triggers and stored procedures. Node states are synchronized by replicating transaction changes at commit time. The cluster is virtually synchronous: this means that each node commits transactions in exactly the same order, although not necessarily at the same physical moment. (The latter is not that important as it may seem, since in most cases DBMS gives no guarantee on when the transaction is actually processed.) Built-in flow control keeps nodes within fraction of a second from each other, this is more than enough for most practical purposes. Main features of MySQL/Galera cluster: * Truly highly available: no committed transaction is ever lost in case of a node crash. All nodes always have consistent state. * True multi-master: all cluster nodes can modify the same table concurrently. * Highly transparent: the cluster is intended as a drop-in replacement for a single MySQL server. (See LIMITATIONS below) * Scalable even with WRITE-intensive applications. This demo distribution contains all software you'll need to setup MySQL/Galera cluster. It is essentially a self-contained MySQL server installation with its own configuration file, data directory and preconfigured empty database. You don't need administrative privileges or to uninstall/disable previously installed MySQL server to use this distribution. 1. CLUSTER SETUP To setup MySQL/Galera cluster you will need several networked computers - one for each mysqld instance you plan to use. For best performance those computers should be of approximately same configuration: Galera replication is synchronous and one overloaded machine may slow down the whole cluster. This however depends on load distribution. The node that does not handle client connections can be considerably slower. It takes 3 steps to set up the cluster: 1) Copy this distribution to all prospective nodes of the cluster and unpack it to location of your choice. 2) Start the first node to begin a new cluster. 3) Start remaining nodes pointing to the first one. (NOTE: You can easily set up the cluster on a single computer. However this makes little sense, as you won't see the the benefits of high availability and scalability. Hence it is not covered by this document.) 1.1 INSTALLATION Just copy and unpack the distribution on the prospective cluster nodes to wherever you have privileges. The distribution was designed to be able to run on most systems without reconfiguration. It is a self-contained MySQL installation and comes with its own data directory and a preconfigured empty database with users 'root' (password 'rootpass') and 'test' (password 'testpass', privileges on schema 'test'). As a result default installation will require at least 1Gb of free space for InnoDB files (will be created on first start). This requirement, as well as other MySQL and Galera options can be changed by editing configuration file which can be found at /mysql/etc/my.cnf. Please see CONFIGURATION chapter for the details on editable parameters. 1.2 CLUSTER URL Cluster URL is a connection handle that will be used by a new node to connect to the rest of the cluster. Its form is backend-specific and backend is determined by URL schema. Default is 'dummy' which means no replication. This demo comes with a distributed group communication backend which schema is 'gcomm'. 1.3 STARTING THE FIRST NODE OF A CLUSTER /mysql-galera is a special MySQL startup script that sets proper options (including data directory path) for mysqld. If you're running it as a superuser, you have to make sure there is 'mysql' user in the system and it has sufficient privileges on the installation directory (see MySQL Manual about running mysqld as root). The first node of a cluster has nowhere to connect to, therefore it has to start with an empty cluster address (note that it still initializes gcomm backend): /mysql-galera -g gcomm:// start 1.4 STARTING REMAINING NODES To add another node to the cluster it must be given the address of one of the existing cluster nodes. Thus, if the first cluster node has IP address 192.168.1.1, then the second will be started like this: /mysql-galera -g gcomm://192.168.1.1 start The third node can use either the first or the second node address and so on. It might take few minutes to start mysqld for the first time as it will have to create required InnoDB files. For full description of mysql-galera options and commands see: /mysql-galera --help 2. USING THE CLUSTER After you have successfully started all cluster nodes, the cluster is ready to use. From the client point of view each cluster node works like a usual MySQL server - client-side application does not have to be changed in any way. Each node can be accessed independently and asynchronously. Just direct SQL load to any one or more of the cluster nodes. For most practical purposes you can treat MySQL/Galera cluster as a single MySQL server listening on multiple interfaces with the exception that you might see transaction deadlocks where you previously didn't. 2.1 LOADING DATA TO CLUSTER Initially distribution database is empty. You can populate it by loading the dump of your data to any one of the nodes. It will be automatically replicated to others. Please note that this release supports only InnoDB storage engine. 2.2 CONNECTING APPLICATION TO CLUSTER As was mentioned above, for the client application each node looks like a normal MySQL server and can be used independently. This creates considerable flexibility in the way the cluster can be utilized. The approaches can be categorized in three groups: 1) Seeking High Availability only. It is similar to traditional MySQL master-slave replication. In this case client application connects to only one node, the rest serving as hot backups / read-only slaves: ,-------------. | application | `-------------' | | | DB backups/read-only slaves ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <===== cluster nodes =====> In the case of primary node failure application can instantly switch to another node without any preparations. This is also a most transparent mode: COMMITs will never return deadlocks and table locks can be used too. 2) Seeking High Availability and improved performance through uniform load distribution. If there are several client connections to the database, they can be uniformly distributed between cluster nodes resulting in better performance. The exact degree of performance improvement depends on application's SQL profile. Note, that transaction rollback rate may also increase. ,-------------. | clients | `-------------' | | | | ,-------------. | application | `-------------' / | \ ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <===== cluster nodes =====> In the case of a node failure application can keep on using the remaining healthy nodes. In this setup application can also be clustered with a dedicated application instance per database node, thus achieving HA not only for the database, but for the whole application stack: ,-------------. | clients | `-------------' // || \\ ,------. ,------. ,------. | app1 | | app2 | | app3 | `------' `------' `------' | | | ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <====== cluster nodes ======> 3) Seeking High Availability and improved performance through smart load distribution. Uniform load distribution can cause undesirably high rollback rate. Directing transactions which access the same set of tables to the same node can considerably improve performance by reducing the number of rollbacks. Also, if your application can distinguish between read/write and read-only transactions, the following configuration may be quite efficient: ,---------------------. | application | `---------------------' writes / | reads \ reads ,-------. ,-------. ,-------. | node1 | | node2 | | node3 | `-------' `-------' `-------' <========= cluster nodes =========> 2.3 LOAD BALANCER If your application cannot utilize several database servers (most don't) you will need to use SQL proxy or a TCP load balancer to distribute load between the MySQL/Galera nodes. This is needed not only to increase performance, but also for a quick switch in case of a node failure. If performance of your application is DBMS-bound, you can run the balancer on the same machine as application/client. Be aware, however, that SQL load balancing might be a CPU hungry operation: usually SQL traffic consists of many small packets. For best results we recommend to carefully examine CPU consumption of the balancer and if needed dedicate a separate machine for it. Unlike traditional MySQL master-slave cluster, MySQL/Galera cluster does not require any SQL traffic filtering, it is highly transparent and plain TCP connection balancer will suffice. TCP connection balancers that were successfully used with MySQL/Galera: - Pen (http://siag.nu/pen/) - GLB (http://www.codership.com/en/downloads/glb) 2.4 ADDING NEW NODE TO A CLUSTER With 0.7 series of releases Codership removes the main obstacle towards using MySQL/Galera in production: inability to add/replace nodes in the working cluster. This distribution features automatic state snapshot transfer to newly joined node. Until node receives state snapshot it won't execute any queries. Detailed state snapshot transfer sequence diagram can be found in http://www.codership.com/files/presentations/Galera_OSC_2009.pdf The process of joining new node into the cluster consists of two phases: 1) State snapshot transfer (SST) from an established cluster member. Depending on the SST method neither joining node, nor SST donor can apply any transactions for the duration of this phase. Transactions replicated by other nodes are buffered in the queue. 2) Catch-up phase when both donor and joiner try to catch up with the cluster by applying transactions buffered in the queue. Using them as working nodes should be avoided. Duration of this phase depends on the load profile and duration of the first phase. NOTE: Transaction buffering is currently happening in memory, so prepare enough swap space. By default cluster chooses the most suitable node to receive state transfer from. There is also an option wsrep_sst_donor to specify desired state snapshot source in my.cnf or on the command line. See CONFIGURATION section for descriptions of all relevant configuration options. In most situations (like cluster on EC2) this distribution should work with default settings. At this point there is only one state transfer method supported and it is based on mysqldump. Although it is relatively slow, it provides complete cloning of the donor state, including system tables and thus is most compliant. 2.5 A "REFERENCE NODE" For practical purposes we recommend to reserve a "reference node" in the cluster. A "reference node" is a node that does not receive SQL load. Having such node in a cluster serves several purposes: 1) Data consistency: since this node does not process any SQL load on its own, it has the lowest probability of transaction conflicts and therefore - indeterministic conflict resolution. In the event of discovered database inconsistencies in the cluster this node will have the most relevant database. 2) Data safety: since this node does not process any SQL load on its own, it has the lowest probability of failing with catastrophic consequences. In the event of total cluster failure (e.g. blackout) this will be the best node to restore cluster from. 3) High availability: a reference node can serve as a dedicated state snapshot donor. Since it does not serve any clients, they won't experience service interruptions and load balancer won't need reconfiguration during SST. Even with the current TCP-based group communication the overhead of having one extra silent node is negligible for most loads. 2.6 "SPLIT-BRAIN" CONDITION Galera cluster is fully distributed and does not use any sort of centralized arbitrator, thus having no single point of failure. However, like any cluster of that kind it may fall to a dreaded "split-brain" condition where half or more nodes of the cluster suddenly disappear (e.g. due to network failure). In general case, having no information about the fate of disappeared nodes, remaining nodes cannot continue to process requests and modify their states. While such situation is generally considered negligibly probable in a multi-node cluster (normally nodes fail one by one), in 2-node cluster a single node failure can lead to this, thus making 3 nodes a minimum requirement for a highly-available cluster. Dedicated Galera packages (not this distribution) contain a lightweight "arbitrator" daemon which can serve as an odd node substitute in situations where cluster size is limited to 2 real nodes. 3. CONFIGURATION Each MySQL/Galera node is configured just like the usual MySQL server, we just added some configuration variables to my.cnf. In addition some options can be passed to mysql-galera startup script (see mysql-galera --help). 3.1 MANDATORY MYSQL OPTIONS binlog_format=ROW This option is required to use row-level replication as opposed to statement-level. For performance and consistency considerations don't change that. As a side effect, binlog, if turned on, can be ROW only. In future this option won't have special meaning. innodb_autoinc_lock_mode=2 This is a required parameter. Without it INSERTs into tables with AUTO_INCREMENT column may fail. autoinc lock modes 0 and 1 can cause unresolved deadlock, and make system unresponsive. innodb_locks_unsafe_for_binlog=1 This setting is required for relaiable parallel applying operation. Mandatory options are hardcoded both in distribution's my.cnf file and in mysql-galera script. 3.2 OPTIONAL OPTIMIZATIONS While not required for correct operation of MySQL/Galera cluster, the following options may be safely set due to the guarantees of synchronous replication: innodb_flush_log_at_trx_commit=0 3.3 WSREP OPTIONS Here WSREP stands for Write-Set REPlication - a synchronous replication API that Codership is developing for transactional databases. Galera is a library that implements WSREP services. Default values are shown. All options are optional except for wsrep_provider and wsrep_cluster_address. wsrep_provider=none A full path to the library that implements WSREP interface. If none is specified, the server behaves almost like a normal mysqld, with slight overhead. mysql-galera script automatically substitutes it to point to Galera implementation shipped with the distribution. It can be overridden with WSREP environment variable. wsrep_provider_options= Provider-specific option string. See http://www.codership.com/wiki for details. wsrep_cluster_address="dummy://" Group Communication System address. Depends on the WSREP provider. This distribution recognizes "dummy://" and "gcomm://

[:port]" Default port is 4567. mysql> set global wsrep_cluster_address=
; will (re)establish connection to ADDRESS. This can be used to change cluster connection in runtime. wsrep_cluster_name="my_wsrep_cluster" Logical cluster name, must be the same for all nodes of the cluster. wsrep_node_name= Human readable node name. Defaults to hostname. wsrep_slave_threads=1 Number of threads dedicated to processing of writesets from other nodes. For better performance we recommend few per CPU core. wsrep_dbug_option Options for the built-in DBUG library (independent from what MySQL uses). Empty by default. Not used in 0.8. wsrep_debug=0 Enable debug-level logging. wsrep_convert_LOCK_to_trx=0 Implicitly convert locking sessions into transactions inside mysqld. By itself it does not mean support for locking sessions, but it prevents the database from going into logically inconsistent state. Disabled by default because of possible memory issues with DB dumps that contain LOCK statements. wsrep_retry_autocommit=1 Retry autocommit queries and single statement transactions should they fail certification test. This is analogous to rescheduling an autocommit query should it go into deadlock with other transactions in the database lock manager. wsrep_auto_increment_control=1 Automatically adjust auto_increment_increment and auto_increment_offset variables based on the number of nodes in the cluster. Significantly reduces certification conflic rate for INSERTS. wsrep_drupal_282555_workaround=1 MySQL seems to have an obscure bug when INSERT into table with AUTO_INCREMENT column with NULL value for that column can fail with a duplicate key error. When this option is on, it retries such INSERTs. Required for stable Drupal operation. Documented at: http://bugs.mysql.com/bug.php?id=41984 http://drupal.org/node/282555 wsrep_sst_method=mysqldump What method to use to copy database state to a newly joined node. Currently supported methods: - mysqldump: generally slow (except on small datasets), but most tested - rsync: the fastest method, especially on large datasets - rsync_wan: same as rsync, but uses deltaxfer to minimize network traffic. wsrep_sst_receive_address= Address at which this node wants to receive state snapshot. Defaults to mysqld bind address, and if that is not specified (0.0.0.0) - to the first IP of eth0 + mysqld bind port. wsrep_sst_auth= Authentication information needed for state transfer. Depends on the state transfer method. For mysqldump-based SST it is : and should be the same on all nodes - it is used to authenticate with both state snapshot receiver and state snapshot donor. In this distribution it is preconfigured to "root:rootpass". wsrep_sst_donor= A name of the node which should serve as state snapshot donor. This allows to control which node will serve state snapshot request. By default the most suitable node is chosen by GCS. 3.4 CONFIGURING LOCAL MYSQL CLIENTS This MySQL/Galera distribution runs mysqld in the "sandbox". Thus mysql clients won't find mysqld socket at default system location. Running mysql client without explicitly specifying socket or port (via --socket or --host/--port options) may, therefore, result in the following: $ mysql -uroot -prootpass ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2) Most applications that use libmysqlclient to connect to MySQL server can be instructed to look in the correct place by adding a following section to system-wide my.cnf file: [client] socket = /mysql/var/mysqld.sock 3.5 FIREWALLS If there are any firewalls used, they should be configured to allow connections between the nodes at the following ports: 3306 - for mysqldump state snapshot transfer 4567 - for replication traffic E.g. to configure iptables to allow connections from a local subnet: iptables -A INPUT -i eth0 -p tcp -m tcp \ --source 192.168.0.1/24 --dport 3306 -j ACCEPT iptables -A INPUT -i eth0 -p tcp -m tcp \ --source 192.168.0.1/24 --dport 4567 -j ACCEPT Substitute real values for IP address of your node and netmask. Better yet, use VPN. 4. Using MySQL/Galera distribution in Amazon EC2 MySQL/Galera works anywhere TCP/IP works. Therefore using MySQL/Galera distribution in Amazon EC2 environment is no different than in LAN. Just launch several instances of your favorite AMI, copy and unpack the distribution, and start the servers. Don't forget to use external addresses if your nodes are running in different accessibility zones (obviously running in different accessibility zones degrades performance somewhat). NOTE: this distribution may be binary incompatible with some older Linux distributions. Please use CentOS 5.0 or newer. 5. LIMITATIONS 1) Currently replication works only with InnoDB storage engine. Any writes to tables of other types, including system (mysql.*) tables are not replicated. However, DDL statements are replicated in statement level, and changes to mysql.* tables will get replicated that way. So, you can safely issue: CREATE USER..., but issuing: INSERT INTO mysql.user..., will not be replicated. 2) DELETE operation is not supported on tables without primary keys. Rows in tables without primary keys may appear in different order on different nodes. As a result SELECT...LIMIT... may return slightly different sets. 3) Unsupported queries: * LOAD DATA size is limited to ~1Gb * lock functions (GET_LOCK(), RELEASE_LOCK()... ) 4) Locking sessions (LOCK TABLES...UNLOCK) are not supported in multi-master mode. However if there's only one node that executes locking sessions, then it'll work. 5) Transaction isolation level should be REPEATABLE_READ (the default). Galera implements implicit snapshot isolation for cluster wide transactions. 6) Due to cluster level optimistic concurrency control, transaction issuing COMMIT may still be aborted at that stage. There can be two transactions writing to same rows and committing in separate cluster nodes, and only one of the them can successfully commit. The failing one, will be aborted. For cluster level aborts, MySQL/galera cluster gives back deadlock error code (Error: 1213 SQLSTATE: 40001 (ER_LOCK_DEADLOCK)). 7) Query log cannot be directed to table. If you enable query logging, you must forward the log to a file: log_output = FILE Use general_log and general_log_file to choose query logging and the log file name 8) XA transactions can not be supported due to possible rollback on commit. galera-3-25.3.20/scripts/mysql/rpm.sh0000755000015300001660000001562213042054732017135 0ustar jenkinsjenkins#!/bin/bash -e if test -z "$MYSQL_SRC" then echo "MYSQL_SRC variable pointing at MySQL/wsrep sources is not set. Can't continue." exit -1 fi usage() { echo -e "Usage: $0 [patch file] [spec file]" } # Parse command line if test $# -lt 1 then usage exit 1 fi set -e # Absolute path of this script folder SCRIPT_ROOT=$(cd $(dirname $0); pwd -P) THIS_DIR=$(pwd -P) set -x MYSQL_DIST_TARBALL=$(cd $(dirname "$1"); pwd -P)/$(basename "$1") ###################################### ## ## ## Prepare patch ## ## ## ###################################### # Source paths are either absolute or relative to script, get absolute MYSQL_SRC=$(cd $MYSQL_SRC; pwd -P; cd $THIS_DIR) pushd $MYSQL_SRC export WSREP_REV=$(bzr revno) export WSPATCH_REVNO=$WSREP_REV if [ -r "VERSION" ] then . "VERSION" WSREP_API=$(grep WSREP_INTERFACE_VERSION wsrep/wsrep_api.h | cut -d '"' -f 2) WSREP_PATCH=$(grep SET\(WSREP_PATCH_VERSION cmake/wsrep.cmake | cut -d '"' -f 2) export MYSQL_VER=$MYSQL_VERSION_MAJOR.$MYSQL_VERSION_MINOR.$MYSQL_VERSION_PATCH else MYSQL_VERSION_MINOR=1 WSREP_API=$(grep WSREP_API= config/ac-macros/wsrep.m4 | cut -d '=' -f 2) WSREP_PATCH=$(grep WSREP_PATCH= config/ac-macros/wsrep.m4 | cut -d '=' -f 2) export MYSQL_VER=`grep AC_INIT configure.in | awk -F '[' '{ print $3 }'| awk -F ']' '{ print $1 }'` fi if test -z "$MYSQL_VER" then echo "Could not determine mysql version." exit -1 fi MYSQL_VERSION_EXTRA="_wsrep_$WSREP_API.$WSREP_PATCH" MYSQL_VERSION_FINAL=${MYSQL_VER}${MYSQL_VERSION_EXTRA} popd #MYSQL_SRC RPM_BUILD_ROOT=$(pwd)/redhat rm -rf $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT pushd $RPM_BUILD_ROOT mkdir -p BUILD RPMS SOURCES SPECS SRPMS pushd RPMS mkdir -p athlon i386 i486 i586 i686 noarch x86_64 popd; popd ###################################### ## ## ## Prepare patched source ## ## ## ###################################### #FIXME: fix spec file to make rpmbuild do it MYSQL_DIST=$(tar -tzf $MYSQL_DIST_TARBALL | head -n1 | sed 's/\/$//') rm -rf $MYSQL_DIST; tar -xzf $MYSQL_DIST_TARBALL # rename according to MYSQL_VERSION_FINAL test "$MYSQL_DIST" != "mysql-$MYSQL_VERSION_FINAL" && \ rm -rf "mysql-$MYSQL_VERSION_FINAL" && \ mv "$MYSQL_DIST" "mysql-$MYSQL_VERSION_FINAL" && \ MYSQL_DIST="mysql-$MYSQL_VERSION_FINAL" pushd $MYSQL_DIST if test -r "$2" # check if patch name was supplied then # patch as a file WSREP_PATCH=$(cd $(dirname "$2"); pwd -P)/$(basename "$2") else # generate patch for this particular MySQL version from LP WSREP_PATCH=$($SCRIPT_ROOT/get_patch.sh mysql-$MYSQL_VER $MYSQL_SRC) fi # patch freaks out on .bzrignore which doesn't exist in source dist and # returns error code patch -p1 -f < $WSREP_PATCH || : # need to fix permissions on 5.1 [ $MYSQL_VERSION_MINOR -eq 1 ] && chmod a+x ./BUILD/*wsrep # update configure script for 5.1 test $MYSQL_VERSION_MINOR -le 5 && ./BUILD/autorun.sh time tar -C .. -czf $RPM_BUILD_ROOT/SOURCES/"$MYSQL_DIST.tar.gz" \ "$MYSQL_DIST" ###################################### ## ## ## Create spec file ## ## ## ###################################### export MAKE="make -j $(cat /proc/cpuinfo | grep -c ^processor)" LIB_TYPE="bundled" # bundled or system [ "$MYSQL_VERSION_MAJOR$MYSQL_VERSION_MINOR" -ge "56" ] \ && MEMCACHED_OPT="-DWITH_LIBEVENT=$LIB_TYPE -DWITH_INNODB_MEMCACHED=ON" \ || MEMCACHED_OPT="" if [ $MYSQL_VERSION_MINOR -eq 1 ] then ./configure --with-wsrep > /dev/null pushd support-files && rm -rf *.spec && make > /dev/null && popd else cmake \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DBUILD_CONFIG=mysql_release \ -DWITH_WSREP=1 \ -DWITH_EXTRA_CHARSETS=all \ -DWITH_SSL=$LIB_TYPE \ -DWITH_ZLIB=$LIB_TYPE \ $MEMCACHED_OPT $MYSQL_SRC \ && make -S fi ###################################### ## ## ## Build binary tar.gz ## ## ## ###################################### [ $MYSQL_VERSION_MINOR -eq 1 ] && make bin-dist || make package # Fix the name of the binary package to contain wsrep suffix OLD_BIN_NAME=$(ls mysql-$MYSQL_VER-linux-*.tar.gz | sed s/\.tar\.gz//) NEW_BIN_NAME=$(echo $OLD_BIN_NAME | sed s/-linux/$MYSQL_VERSION_EXTRA-linux/) echo "Repacking $OLD_BIN_NAME -> $NEW_BIN_NAME" tar -xzf $OLD_BIN_NAME.tar.gz && rm $OLD_BIN_NAME.tar.gz mv $OLD_BIN_NAME $NEW_BIN_NAME tar -czf $NEW_BIN_NAME.tar.gz $NEW_BIN_NAME && rm -rf $NEW_BIN_NAME popd # MYSQL_DIST WSREP_SPEC=${WSREP_SPEC:-"$MYSQL_DIST/support-files/mysql.spec"} mv $WSREP_SPEC $RPM_BUILD_ROOT/SPECS/$MYSQL_DIST.spec WSREP_SPEC=$RPM_BUILD_ROOT/SPECS/$MYSQL_DIST.spec mv $WSREP_PATCH ./$MYSQL_DIST.patch mv $MYSQL_DIST/$MYSQL_DIST-linux-*.tar.gz ./ #cleaning intermedieate sources: rm -rf $MYSQL_DIST if [ -n "$TAR_ONLY" ] || ! which rpmbuild >/dev/null 2>&1 then echo "Not building RPMs" exit 0 fi ###################################### ## ## ## Build RPM ## ## ## ###################################### if [ $MYSQL_VERSION_MINOR == 1 ] then # cflags vars might be obsolete with 5.5 wsrep_cflags="-DWSREP_PROC_INFO -DMYSQL_MAX_VARIABLE_VALUE_LEN=2048" fast_cflags="-O3 -fno-omit-frame-pointer" uname -m | grep -q i686 && \ cpu_cflags="-mtune=i686" || cpu_cflags="-mtune=core2" RPM_OPT_FLAGS="$fast_cflags $cpu_cflags $wsrep_cflags" fi RPMBUILD() { if [ $MYSQL_VERSION_MINOR -lt 5 ] then WSREP_RPM_OPTIONS=(--with wsrep --with yassl \ --define "optflags $RPM_OPT_FLAGS") else export MEMCACHED_OPT="-DWITH_LIBEVENT=system -DWITH_INNODB_MEMCACHED=ON" \ WSREP_RPM_OPTIONS=(--define='with_wsrep 1' \ --define='distro_specific 1' \ --define='runselftest 0' \ --define='with_ssl system' \ --define='mysql_packager Codership Oy ') fi $(which rpmbuild) --rmsource --define "_topdir $RPM_BUILD_ROOT" \ "${WSREP_RPM_OPTIONS[@]}" -ba $WSREP_SPEC } pushd "$RPM_BUILD_ROOT" if [ "$(whoami)" == "root" ] then chown -R mysql $RPM_BUILD_ROOT su mysql -c RPMBUILD else RPMBUILD fi popd ###################################### ## ## ## Copy required files here ## ## ## ###################################### mv $WSREP_SPEC ./ uname -m | grep -q i686 && ARCH=i386 || ARCH=x86_64 mv $RPM_BUILD_ROOT/RPMS/$ARCH/MySQL-server-*.rpm ./ # remove the patch file if is was automatically generated if test ! -r "$2"; then rm -rf $WSREP_PATCH; fi rm -rf $RPM_BUILD_ROOT exit 0 galera-3-25.3.20/scripts/openrep/0000755000015300001660000000000013042054732016275 5ustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/configure0000777000015300001660000000000013042054732022105 2redundantustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/flush0000777000015300001660000000000013042054732022166 2not_supportedustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/stop0000777000015300001660000000000013042054732020565 2serviceustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/online0000777000015300001660000000000013042054732021064 2serviceustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/demote0000777000015300001660000000000013042054732021401 2redundantustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/not_supported0000755000015300001660000000021113042054732021122 0ustar jenkinsjenkins#!/bin/sh # Copuright (C) 2009 Codership Oy echo "Operation not supported: $(basename $0 | sed s/\.sh//)" exit 0 galera-3-25.3.20/scripts/openrep/service0000755000015300001660000001446713042054732017677 0ustar jenkinsjenkins#!/bin/bash -x # Copyright (C) 2009 Codership Oy PLUGIN_BASE_DIR=$(cd $(dirname $0); pwd -P) SELF=$PLUGIN_BASE_DIR/$(basename $0) WSREP_CLUSTER_NAME=${1:-"tor_galera_cluster"} WSREP_NODE_NAME=${2:-"$(hostname)"} PLUGIN_CONF=${3:-"$PLUGIN_BASE_DIR/plugin.cnf"} . $PLUGIN_CONF #=============== Fall back to reasonable defaults ======================= # MySQL configuration file MYSQL_CNF=${MYSQL_CNF:-"$MYSQL_BASE_DIR/etc/my.cnf"} if test -s "$MYSQL_CNF" then DEFAULTS_OPTION=" --defaults-file=$MYSQL_CNF " my_cnf_datadir=$(grep ^datadir $MYSQL_CNF | sed s/[^/]*//) else DEFAULTS_OPTION=" --no-defaults " fi # If it was not given explicitely, use the one from my.cnf MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$my_cnf_datadir"} # If it was not found in my.cnf, use distribution default MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$MYSQL_BASE_DIR/var"} # use mysqld server directly, better not have automatic restarting MYSQLD=${MYSQLD:-"$MYSQL_BASE_DIR/libexec/mysqld"} MYSQLADMIN=${MYSQLADMIN:-"$MYSQL_BASE_DIR/bin/mysqladmin"} # Port, socket and pid files MYSQL_PORT=${MYSQL_PORT:-3306} MYSQL_SOCKET=${MYSQL_SOCKET:-"$MYSQL_DATA_DIR/mysqld.sock"} MYSQL_PID=${MYSQL_PID:-"$MYSQL_DATA_DIR/mysqld.pid"} # Shutdown wait timeout. MYSQL_SHUTDOWN_WAIT=60 #============= Nothing servicable below ================================ # User to run as if started under superuser MYSQLD_USER=$(whoami) if test "$MYSQLD_USER" = "root" then MYSQLD_USER=mysql fi #ROOT_USER=${ROOT_USER:-"-uroot"} #ROOT_PSWD=${ROOT_PSWD:-"-prootpass"} #mysql_log="$MYSQL_DATA_DIR/$(hostname).log" usage() { cat - << EOF usage: service Commands: check : check cosistency either locally or through network start : start servers stop : stop servers restart : stop and start servers status : show running servers EOF } # Checks if a process with a given PID is still running find_pid() { ps axc | grep mysqld | grep -w ^\ *$1 > /dev/null } galera_start() { local failed if ! test -x $MYSQLD then echo "$MYSQLD executable not found" exit -1 fi if test -f $MYSQL_PID then echo "Found existing '$MYSQL_PID'. Please run '$0 stop'" exit -1; fi if test -f $WSREP_PROVIDER || test $WSREP_PROVIDER == "none" then WSREP_OPTS="$WSREP_OPTS --wsrep_provider=$WSREP_PROVIDER" else echo "WSREP provider '$WSREP_PROVIDER' not found" exit -1 fi WSREP_OPTS="$WSREP_OPTS \ --wsrep_cluster_name=$WSREP_CLUSTER_NAME \ --wsrep_cluster_address=$WSREP_CLUSTER_ADDRESS \ --wsrep_sst_method=$WSREP_SST_METHOD \ --wsrep_local_cache_size=$WSREP_LOCAL_CACHE_SIZE \ --wsrep_start_position=$WSREP_START_POSITION \ --wsrep_debug=$WSREP_DEBUG \ --wsrep_auto_increment_control=$WSREP_AUTO_INCREMENT_CONTROL \ --wsrep_retry_autocommit=$WSREP_RETRY_AUTOCOMMIT \ --wsrep_convert_LOCK_to_trx=$WSREP_CONVERT_LOCK_TO_TRX \ --wsrep_drupal_282555_workaround=$WSREP_DRUPAL_282555_WORKAROUND \ --wsrep_ws_persistency=$WSREP_WS_PERSISTENCY \ --wsrep_slave_threads=$WSREP_SLAVE_THREADS " MYSQLD_OPTS=" --user=$MYSQLD_USER \ --basedir=$MYSQL_BASE_DIR \ --datadir=$MYSQL_DATA_DIR \ --pid-file=$MYSQL_PID \ --port=$MYSQL_PORT \ --socket=$MYSQL_SOCKET \ --skip-locking \ --binlog_format=ROW \ --default-storage-engine=InnoDB " INNODB_OPTS=" --innodb_autoinc_lock_mode=2 \ --innodb_flush_log_at_trx_commit=0 \ --innodb_doublewrite=0" err_log="$MYSQL_DATA_DIR/$(hostname).err" echo -n "Starting mysqld instance with data dir $MYSQL_DATA_DIR and listening at port $MYSQL_PORT and socket $MYSQL_SOCKET..." LD_LIBRARY_PATH=$(cd $(dirname $WSREP_PROVIDER) && pwd -P) export LD_LIBRARY_PATH export PATH=$MYSQL_BASE_DIR/bin:$PATH nohup $MYSQLD $DEFAULTS_OPTION $MYSQLD_OPTS $INNODB_OPTS $WSREP_OPTS \ 1>/dev/null 2>>$err_log & my_pid=$! # echo "Waiting for pid file" while ! test -r $MYSQL_PID do sleep 1 if find_pid $my_pid then # process is alive, wait for pid file echo -n "." else failed="yes" break fi done if test "$failed" != "yes" then echo " Done (PID:$(cat $MYSQL_PID))" else echo " Failed (PID:$my_pid)" fi } galera_stop() { # check pid file if test -r $MYSQL_PID then # check if corresponding mysqld is running # if ps axc | grep mysqld | grep $(cat $MYSQL_PID) >/dev/null 2>&1 local my_pid=$(cat $MYSQL_PID) if find_pid $my_pid then echo -n "Killing PID $my_pid" kill $my_pid # wait for pid file to disappear for second in $(seq 1 $MYSQL_SHUTDOWN_WAIT) do echo -n "." sleep 1 if test ! -r $MYSQL_PID then break fi done echo "" if test "$second" = "$MYSQL_SHUTDOWN_WAIT" then echo -n "Failed to stop mysqld safely. Killing with -9... " kill -9 $my_pid fi else echo -n "Removing stale PID file $MYSQL_PID... " fi rm -rf $MYSQL_PID echo "Done" else echo "PID file not found: $MYSQL_PID" fi } galera_restart() { galera_stop galera_start } dump() { #local ROUTINES="--routines" # don't dump routines yet, will cause false err. local DUMP_OPTIONS=" --skip-opt --compact --flush-logs --lock-all-tables \ --quick --create-options --set-charset --skip-comments $ROUTINES " DB=${DB:-"--all-databases"} #set -x mysqldump $DUMP_OPTIONS $ROOT_USER $ROOT_PSWD -h127.0.0.1 -P$MYSQL_PORT \ $IGNORE_TABLES $DB #set +x } checksum() { CS=`dump | md5sum` echo $CS } # write set level, SQL, RBR or ROW WS_LEVEL="RBR" #DB="test" # use 'test' database if none given IGNORE_TABLES="" case $(basename $0) in 'dump') COMMAND="dump" ;; 'check') COMMAND="checksum" ;; 'online') COMMAND=galera_start ;; 'stop'|'halt'|'kill'|'offline') COMMAND=galera_stop ;; 'restart') COMMAND=galera_restart ;; 'status') COMMAND=status ;; *) echo $0 # must be command usage exit 1 ;; esac $COMMAND # galera-3-25.3.20/scripts/openrep/common0000644000015300001660000000024213042054732017506 0ustar jenkinsjenkinsset -x MYSQL_BIN=${MYSQL_BIN:-"$MYSQL_ROOT/libexec/mysqld"} MYSQL_DATA_DIR=${MYSQL_DATA_DIR:-"$MYSQL_ROOT/var"} MYSQL_CNF=${MYSQL_CNF:-"$MYSQL_ROOT/etc/my.cnf"} galera-3-25.3.20/scripts/openrep/promote0000777000015300001660000000000013042054732021611 2redundantustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/provision0000777000015300001660000000000013042054732023075 2not_supportedustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/kill0000777000015300001660000000000013042054732020533 2serviceustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/plugin.cnf0000644000015300001660000000122113042054732020257 0ustar jenkinsjenkinsMYSQL_BASE_DIR= # MYSQLD= # MYSQL_DATA_DIR= # MYSQL_CNF= MYSQL_ROOT_USER=root MYSQL_ROOT_PSWD=rootpass WSREP_PROVIDER= WSREP_CLUSTER_ADDRESS="dummy://" # WSREP_CLUSTER_NAME= # WSREP_NODE_NAME= # WSREP_NODE_INCOMING_ADDRESS= WSREP_SST_METHOD=mysqldump WSREP_SST_AUTH=$MYSQL_ROOT_USER:$MYSQL_ROOT_PSWD # WSREP_SST_ADDRESS= # WSREP_SST_DONOR="" # # Parameters below rarely require changing # WSREP_SLAVE_THREADS=1 WSREP_LOCAL_CACHE_SIZE=20971520 WSREP_START_POSITION= WSREP_DEBUG=0 WSREP_AUTO_INCREMENT_CONTROL=1 WSREP_RETRY_AUTOCOMMIT=1 WSREP_CONVERT_LOCK_TO_TRX=1 WSREP_DRUPAL_282555_WORKAROUND=1 WSREP_WS_PERSISTENCY=0 # WSREP_DBUG= # WSREP_DATA_HOME_DIR= galera-3-25.3.20/scripts/openrep/redundant0000755000015300001660000000021013042054732020200 0ustar jenkinsjenkins#!/bin/sh # Copuright (C) 2009 Codership Oy echo "Operation is redundant: $(basename $0 | sed s/\.sh//)" exit 0 galera-3-25.3.20/scripts/openrep/prepare0000777000015300001660000000000013042054732021562 2redundantustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/waitevent0000777000015300001660000000000013042054732023053 2not_supportedustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/offline0000777000015300001660000000000013042054732021222 2serviceustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/release0000777000015300001660000000000013042054732022465 2not_supportedustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/status0000777000015300001660000000000013042054732022370 2not_supportedustar jenkinsjenkinsgalera-3-25.3.20/scripts/openrep/halt0000777000015300001660000000000013042054732020530 2serviceustar jenkinsjenkinsgalera-3-25.3.20/GALERA_REVISION0000644000015300001660000000001013042122666015323 0ustar jenkinsjenkins7e383f7 galera-3-25.3.20/.travis.yml0000644000015300001660000000172013042054732015247 0ustar jenkinsjenkinslanguage: cpp sudo: false cache: - apt - ccache compiler: - gcc - clang addons: apt: sources: - ubuntu-toolchain-r-test # https://github.com/travis-ci/travis-ci/issues/3668 # for alternatives in container builds packages: - libboost-program-options-dev - libssl-dev - check - scons - gcc-5 - g++-5 script: - if [ ${CC} != "clang" ]; then MEM=$(head -n 1 /proc/meminfo); RAM=(${MEM// / }); MAX_JOBS=$(( ${RAM[1]} / 393216 )); echo Max jobs\:\ $MAX_JOBS; CORES=$(grep -c ^processor /proc/cpuinfo); [ $CORES -lt $MAX_JOBS ] && MAX_JOBS=$CORES; echo Real jobs\:\ $MAX_JOBS; ${CC} --version; ${CXX} --version; ./scripts/build.sh -j $MAX_JOBS; export CC=`which gcc-5`; export CXX=`which g++-5`; ${CC} --version; ${CXX} --version; ./scripts/build.sh -j $MAX_JOBS; else ${CC} --version ; ${CXX} --version; ./scripts/build.sh; fi galera-3-25.3.20/AUTHORS0000644000015300001660000000004213042054732014202 0ustar jenkinsjenkinsCodership Oy galera-3-25.3.20/COPYING0000644000015300001660000000314413042054732014173 0ustar jenkinsjenkinsGalera replication - implementation of write set replication (wsrep) interface. Copyright (C) 2007-2014 Codership Oy This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You will find the GNU General Public License version 2 in the file LICENSE or at http://www.gnu.org/licenses/gpl-2.0.html In addition, as a special exception, the copyright holders give permission to link the code of portions of this program with the OpenSSL project's "OpenSSL" library (or with modified versions of it that use the same license as the "OpenSSL" library), and distribute the linked executables. You must obey the GNU General Public License in all respects for all of the code used other than "OpenSSL". If you modify this file, you may extend this exception to your version of the file, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. docs/ Documentation is dual licensed with the Creative Commons Attribution ShareAlike and GNU Free Documentation Licenses. See docs/COPYING for details. Following modules have their own authors and licenses: asio/ see asio/COPYING for details chromium/ see chromium/AUTHORS and chromium/LICENSE for details www.evanjones.ca/ see www.evanjones.ca/AUTHORS and www.evanjones.ca/LICENSE for details galera-3-25.3.20/SConstruct0000644000015300001660000004076013042054732015177 0ustar jenkinsjenkins################################################################### # # Copyright (C) 2010-2016 Codership Oy # # SCons build script to build galera libraries # # How to control the build with environment variables: # Set CC to specify C compiler # Set CXX to specify C++ compiler # Set CPPFLAGS to add non-standard include paths and preprocessor macros # Set CCFLAGS to *override* optimization and architecture-specific options # Set CFLAGS to supply C compiler options # Set CXXFLAGS to supply C++ compiler options # Set LDFLAGS to *override* linking flags # Set LIBPATH to add non-standard linker paths # Set RPATH to add rpaths # # Script structure: # - Help message # - Default parameters # - Read commandline options # - Set up and configure default build environment # - Set up and configure check unit test build environment # - Run root SConscript with variant_dir # #################################################################### import os import platform import string sysname = os.uname()[0].lower() machine = platform.machine() bits = ARGUMENTS.get('bits', platform.architecture()[0]) print 'Host: ' + sysname + ' ' + machine + ' ' + bits x86 = any(arch in machine for arch in [ 'x86', 'amd64', 'i686', 'i386', 'i86pc' ]) if bits == '32bit': bits = 32 elif bits == '64bit': bits = 64 # # Print Help # Help(''' Build targets: build tests check install all Default target: all Commandline Options: debug=n debug build with optimization level n build_dir=dir build directory, default: '.' boost=[0|1] disable or enable boost libraries system_asio=[0|1] use system asio library, if available boost_pool=[0|1] use or not use boost pool allocator revno=XXXX source code revision number bpostatic=path a path to static libboost_program_options.a extra_sysroot=path a path to extra development environment (Fink, Homebrew, MacPorts, MinGW) bits=[32bit|64bit] ''') # bpostatic option added on Percona request # # Default params # build_target = 'all' # Optimization level opt_flags = ' -g -O3 -DNDEBUG' # Architecture (defaults to build host type) compile_arch = '' link_arch = '' # Build directory build_dir = '' # # Read commandline options # build_dir = ARGUMENTS.get('build_dir', '') # Debug/dbug flags debug = ARGUMENTS.get('debug', -1) dbug = ARGUMENTS.get('dbug', False) debug_lvl = int(debug) if debug_lvl >= 0 and debug_lvl < 3: opt_flags = ' -g -O%d -fno-inline' % debug_lvl dbug = True elif debug_lvl == 3: opt_flags = ' -g -O3' if dbug: opt_flags = opt_flags + ' -DGU_DBUG_ON' if sysname == 'sunos': compile_arch = ' -mtune=native' elif x86: if bits == 32: if machine == 'x86_64': compile_arch = ' -mx32' else: compile_arch = ' -m32 -march=i686' if sysname == 'linux': link_arch = ' -Wl,-melf_i386' else: compile_arch = ' -m64' if sysname == 'linux': link_arch = ' -Wl,-melf_x86_64' link_arch = compile_arch + link_arch elif machine == 's390x': compile_arch = ' -mzarch' if bits == 32: compile_arch += ' -m32' boost = int(ARGUMENTS.get('boost', 1)) boost_pool = int(ARGUMENTS.get('boost_pool', 0)) system_asio= int(ARGUMENTS.get('system_asio', 1)) ssl = int(ARGUMENTS.get('ssl', 1)) tests = int(ARGUMENTS.get('tests', 1)) deterministic_tests = int(ARGUMENTS.get('deterministic_tests', 0)) strict_build_flags = int(ARGUMENTS.get('strict_build_flags', 1)) GALERA_VER = ARGUMENTS.get('version', '3.20') GALERA_REV = ARGUMENTS.get('revno', 'XXXX') # Attempt to read from file if not given if GALERA_REV == "XXXX" and os.path.isfile("GALERA_REVISION"): with open("GALERA_REVISION", "r") as f: GALERA_REV = f.readline().rstrip("\n") # export to any module that might have use of those Export('GALERA_VER', 'GALERA_REV') print 'Signature: version: ' + GALERA_VER + ', revision: ' + GALERA_REV LIBBOOST_PROGRAM_OPTIONS_A = ARGUMENTS.get('bpostatic', '') LIBBOOST_SYSTEM_A = string.replace(LIBBOOST_PROGRAM_OPTIONS_A, 'boost_program_options', 'boost_system') # # Set up and export default build environment # env = Environment(ENV = {'PATH' : os.environ['PATH'], 'HOME' : os.environ['HOME']}) # Set up environment for ccache and distcc #env['ENV']['HOME'] = os.environ['HOME'] #env['ENV']['DISTCC_HOSTS'] = os.environ['DISTCC_HOSTS'] #env['ENV']['CCACHE_PREFIX'] = os.environ['CCACHE_PREFIX'] if 'CCACHE_DIR' in os.environ: env['ENV']['CCACHE_DIR'] = os.environ['CCACHE_DIR'] if 'CCACHE_CPP2' in os.environ: env['ENV']['CCACHE_CPP2'] = os.environ['CCACHE_CPP2'] # Set CC and CXX compilers cc = os.getenv('CC', 'default') if cc != 'default': env.Replace(CC = cc) cxx = os.getenv('CXX', 'default') if cxx != 'default': env.Replace(CXX = cxx) link = os.getenv('LINK', 'default') if link != 'default': env.Replace(LINK = link) # Initialize CPPFLAGS and LIBPATH from environment to get user preferences env.Replace(CPPFLAGS = os.getenv('CPPFLAGS', '')) env.Replace(CCFLAGS = os.getenv('CCFLAGS', opt_flags + compile_arch)) env.Replace(CFLAGS = os.getenv('CFLAGS', '')) env.Replace(CXXFLAGS = os.getenv('CXXFLAGS', '')) env.Replace(LINKFLAGS = os.getenv('LDFLAGS', link_arch)) env.Replace(LIBPATH = [os.getenv('LIBPATH', '')]) env.Replace(RPATH = [os.getenv('RPATH', '')]) # Set -pthread flag explicitly to make sure that pthreads are # enabled on all platforms. env.Append(CCFLAGS = ' -pthread') # Freebsd ports are installed under /usr/local if sysname == 'freebsd' or sysname == 'sunos': env.Append(LIBPATH = ['/usr/local/lib']) env.Append(CPPFLAGS = ' -I/usr/local/include ') if sysname == 'sunos': env.Replace(SHLINKFLAGS = '-shared ') # Add paths is extra_sysroot argument was specified extra_sysroot = ARGUMENTS.get('extra_sysroot', '') if sysname == 'darwin' and extra_sysroot == '': # common developer environments and paths if os.system('which -s port') == 0 and os.path.isfile('/opt/local/bin/port'): extra_sysroot = '/opt/local' elif os.system('which -s brew') == 0 and os.path.isfile('/usr/local/bin/brew'): extra_sysroot = '/usr/local' elif os.system('which -s fink') == 0 and os.path.isfile('/sw/bin/fink'): extra_sysroot = '/sw' if extra_sysroot != '': env.Append(LIBPATH = [extra_sysroot + '/lib']) env.Append(CPPFLAGS = ' -I' + extra_sysroot + '/include') # print env.Dump() # Preprocessor flags if sysname != 'sunos' and sysname != 'darwin' and sysname != 'freebsd': env.Append(CPPFLAGS = ' -D_XOPEN_SOURCE=600') if sysname == 'sunos': env.Append(CPPFLAGS = ' -D__EXTENSIONS__') env.Append(CPPFLAGS = ' -DHAVE_COMMON_H') # Common C/CXX flags # These should be kept minimal as they are appended after C/CXX specific flags env.Append(CCFLAGS = ' -fPIC -Wall -Wextra -Wno-unused-parameter') # C-specific flags env.Prepend(CFLAGS = '-std=c99 -fno-strict-aliasing -pipe ') # CXX-specific flags # Note: not all 3rd-party libs like '-Wold-style-cast -Weffc++' # adding those after checks env.Prepend(CXXFLAGS = '-Wno-long-long -Wno-deprecated -ansi ') if sysname != 'sunos': env.Prepend(CXXFLAGS = '-pipe ') # Linker flags # TODO: enable ' -Wl,--warn-common -Wl,--fatal-warnings' after warnings from # static linking have beed addressed # #env.Prepend(LINKFLAGS = '-Wl,--warn-common -Wl,--fatal-warnings ') # # Check required headers and libraries (autoconf functionality) # # # Custom tests: # def CheckSystemASIOVersion(context): system_asio_test_source_file = """ #include #if ASIO_VERSION < 101001 #error "Included asio version is too old" #endif int main() { return 0; } """ context.Message('Checking ASIO version (> 1.10.1) ... ') result = context.TryLink(system_asio_test_source_file, '.cpp') context.Result(result) return result # # Construct confuration context # conf = Configure(env, custom_tests = {'CheckSystemASIOVersion': CheckSystemASIOVersion}) # System headers and libraries if not conf.CheckLib('pthread'): print 'Error: pthread library not found' Exit(1) # libatomic may be needed on some 32bit platforms (and 32bit userland PPC64) # for 8 byte atomics but not generally required if not x86: conf.CheckLib('atomic') if sysname != 'darwin': if not conf.CheckLib('rt'): print 'Error: rt library not found' Exit(1) if sysname == 'freebsd': if not conf.CheckLib('execinfo'): print 'Error: execinfo library not found' Exit(1) if sysname == 'sunos': if not conf.CheckLib('socket'): print 'Error: socket library not found' Exit(1) if not conf.CheckLib('crypto'): print 'Error: crypto library not found' Exit(1) if not conf.CheckLib('nsl'): print 'Error: nsl library not found' Exit(1) if conf.CheckHeader('sys/epoll.h'): conf.env.Append(CPPFLAGS = ' -DGALERA_USE_GU_NETWORK') if conf.CheckHeader('byteswap.h'): conf.env.Append(CPPFLAGS = ' -DHAVE_BYTESWAP_H') if conf.CheckHeader('endian.h'): conf.env.Append(CPPFLAGS = ' -DHAVE_ENDIAN_H') elif conf.CheckHeader('sys/endian.h'): conf.env.Append(CPPFLAGS = ' -DHAVE_SYS_ENDIAN_H') elif conf.CheckHeader('sys/byteorder.h'): conf.env.Append(CPPFLAGS = ' -DHAVE_SYS_BYTEORDER_H') elif sysname != 'darwin': print 'can\'t find byte order information' Exit(1) if conf.CheckHeader('execinfo.h'): conf.env.Append(CPPFLAGS = ' -DHAVE_EXECINFO_H') # Additional C headers and libraries # boost headers if not conf.CheckCXXHeader('boost/shared_ptr.hpp'): print 'boost/shared_ptr.hpp not found or not usable' Exit(1) conf.env.Append(CPPFLAGS = ' -DHAVE_BOOST_SHARED_PTR_HPP') if conf.CheckCXXHeader('unordered_map'): conf.env.Append(CPPFLAGS = ' -DHAVE_UNORDERED_MAP') elif conf.CheckCXXHeader('tr1/unordered_map'): conf.env.Append(CPPFLAGS = ' -DHAVE_TR1_UNORDERED_MAP') else: if conf.CheckCXXHeader('boost/unordered_map.hpp'): conf.env.Append(CPPFLAGS = ' -DHAVE_BOOST_UNORDERED_MAP_HPP') else: print 'no unordered map header available' Exit(1) # pool allocator if boost == 1: # Default suffix for boost multi-threaded libraries if sysname == 'darwin': boost_library_suffix = '-mt' else: boost_library_suffix = '' if sysname == 'darwin' and extra_sysroot != '': boost_library_path = extra_sysroot + '/lib' else: boost_library_path = '' # Use nanosecond time precision conf.env.Append(CPPFLAGS = ' -DBOOST_DATE_TIME_POSIX_TIME_STD_CONFIG=1') # Common procedure to find boost static library if bits == 64: boost_libpaths = [ boost_library_path, '/usr/lib64', '/usr/local/lib64' ] else: boost_libpaths = [ boost_library_path, '/usr/local/lib', '/usr/lib' ] def check_boost_library(libBaseName, header, configuredLibPath, autoadd = 1): libName = libBaseName + boost_library_suffix if configuredLibPath != '' and not os.path.isfile(configuredLibPath): print "Error: file '%s' does not exist" % configuredLibPath Exit(1) if configuredLibPath == '': for libpath in boost_libpaths: libname = libpath + '/lib%s.a' % libName if os.path.isfile(libname): configuredLibPath = libname break if configuredLibPath != '': if not conf.CheckCXXHeader(header): print "Error: header '%s' does not exist" % header Exit (1) if autoadd: conf.env.Append(LIBS=File(configuredLibPath)) else: return File(configuredLibPath) else: if not conf.CheckLibWithHeader(libs=[libName], header=header, language='CXX', autoadd=autoadd): print 'Error: library %s does not exist' % libName Exit (1) return [libName] # Required boost headers/libraries # if boost_pool == 1: if conf.CheckCXXHeader('boost/pool/pool_alloc.hpp'): print 'Using boost pool alloc' conf.env.Append(CPPFLAGS = ' -DGALERA_USE_BOOST_POOL_ALLOC=1') # due to a bug in boost >= 1.50 we need to link with boost_system # - should be a noop with no boost_pool. if sysname == 'darwin': if conf.CheckLib('boost_system' + boost_library_suffix): conf.env.Append(LIBS=['boost_system' + boost_library_suffix]) check_boost_library('boost_system', 'boost/system/error_code.hpp', LIBBOOST_SYSTEM_A) else: print 'Error: boost/pool/pool_alloc.hpp not found or not usable' Exit(1) libboost_program_options = check_boost_library('boost_program_options', 'boost/program_options.hpp', LIBBOOST_PROGRAM_OPTIONS_A, autoadd = 0) else: print 'Not using boost' # asio if system_asio == 1 and conf.CheckCXXHeader('asio.hpp') and conf.CheckSystemASIOVersion(): conf.env.Append(CPPFLAGS = ' -DHAVE_SYSTEM_ASIO -DHAVE_ASIO_HPP') else: system_asio = False print "Falling back to bundled asio" if not system_asio: # Fall back to embedded asio conf.env.Append(CPPPATH = [ '#/asio' ]) if conf.CheckCXXHeader('asio.hpp'): conf.env.Append(CPPFLAGS = ' -DHAVE_ASIO_HPP') else: print 'asio headers not found or not usable' Exit(1) # asio/ssl if ssl == 1: if conf.CheckCXXHeader('asio/ssl.hpp'): conf.env.Append(CPPFLAGS = ' -DHAVE_ASIO_SSL_HPP') else: print 'ssl support required but asio/ssl.hpp not found or not usable' print 'compile with ssl=0 or check that openssl devel headers are usable' Exit(1) if conf.CheckLib('ssl'): conf.CheckLib('crypto') else: print 'ssl support required but openssl library not found' print 'compile with ssl=0 or check that openssl library is usable' Exit(1) # these will be used only with our softaware if strict_build_flags == 1: conf.env.Append(CCFLAGS = ' -Werror -pedantic') if 'clang' not in conf.env['CXX']: conf.env.Prepend(CXXFLAGS = '-Weffc++ -Wold-style-cast ') else: conf.env.Append(CCFLAGS = ' -Wno-self-assign') conf.env.Append(CCFLAGS = ' -Wno-gnu-zero-variadic-macro-arguments') conf.env.Append(CXXFLAGS = ' -Wno-variadic-macros') if 'ccache' in conf.env['CXX']: conf.env.Append(CCFLAGS = ' -Qunused-arguments') env = conf.Finish() Export('x86', 'bits', 'env', 'sysname', 'libboost_program_options') # # Actions to build .dSYM directories, containing debugging information for Darwin # if sysname == 'darwin' and int(debug) >= 0 and int(debug) < 3: env['LINKCOM'] = [env['LINKCOM'], 'dsymutil $TARGET'] env['SHLINKCOM'] = [env['SHLINKCOM'], 'dsymutil $TARGET'] # # Set up and export environment for check unit tests # # Clone base from default environment check_env = env.Clone() conf = Configure(check_env) # Check header and library if not conf.CheckHeader('check.h'): print 'Error: check header file not found or not usable' Exit(1) if not conf.CheckLib('check'): print 'Error: check library not found or not usable' Exit(1) if not conf.CheckLib('m'): print 'Error: math library not found or not usable' Exit(1) # potential check dependency, link if present conf.CheckLib('subunit') if sysname != 'darwin': if not conf.CheckLib('rt'): print 'Error: realtime library not found or not usable' Exit(1) conf.Finish() # # this follows recipes from http://www.scons.org/wiki/UnitTests # def builder_unit_test(target, source, env): app = str(source[0].abspath) if os.spawnl(os.P_WAIT, app, app)==0: open(str(target[0]),'w').write("PASSED\n") else: return 1 def builder_unit_test_dummy(target, source, env): return 0 # Create a builder for tests if tests == 1: bld = Builder(action = builder_unit_test) else: bld = Builder(action = builder_unit_test_dummy) check_env.Append(BUILDERS = {'Test' : bld}) Export('check_env') # # If deterministic_tests is given, export GALERA_TEST_DETERMINISTIC # so that the non-deterministic tests can be filtered out. # if deterministic_tests: os.environ['GALERA_TEST_DETERMINISTIC'] = '1' # # Run root SConscript with variant_dir # SConscript('SConscript', variant_dir=build_dir) galera-3-25.3.20/galerautils/0000755000015300001660000000000013042054732015452 5ustar jenkinsjenkinsgalera-3-25.3.20/galerautils/src/0000755000015300001660000000000013042054732016241 5ustar jenkinsjenkinsgalera-3-25.3.20/galerautils/src/gu_rand.h0000644000015300001660000000124313042054732020031 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file routines to generate "random" seeds for RNGs by collecting some easy * entropy. * * gu_rand_seed_long() goes for srand48() * * gu_rand_seed_int() goes for srand() and rand_r() * * $Id$ */ #ifndef _gu_rand_h_ #define _gu_rand_h_ #include "gu_arch.h" #include // for pid_t extern long int gu_rand_seed_long (long long time, const void* heap_ptr, pid_t pid); #if GU_WORDSIZE == 32 extern unsigned int gu_rand_seed_int (long long time, const void* heap_ptr, pid_t pid); #else #define gu_rand_seed_int gu_rand_seed_long #endif /* GU_WORDSIZE */ #endif /* _gu_rand_h_ */ galera-3-25.3.20/galerautils/src/gu_atomic.hpp0000644000015300001660000000331113042054732020717 0ustar jenkinsjenkins// // Copyright (C) 2010-2014 Codership Oy // // // @todo Check that the at least the following gcc versions are supported // gcc version 4.1.2 20080704 (Red Hat 4.1.2-48) // #ifndef GU_ATOMIC_HPP #define GU_ATOMIC_HPP #include "gu_atomic.h" #include namespace gu { template class Atomic { public: Atomic(I i = 0) : i_(i) { } I operator()() const { I i; gu_atomic_get(&i_, &i); return i; } Atomic& operator=(I i) { gu_atomic_set(&i_, &i); return *this; } I fetch_and_zero() { return gu_atomic_fetch_and_and(&i_, 0); } I fetch_and_add(I i) { return gu_atomic_fetch_and_add(&i_, i); } I add_and_fetch(I i) { return gu_atomic_add_and_fetch(&i_, i); } I sub_and_fetch(I i) { return gu_atomic_sub_and_fetch(&i_, i); } Atomic& operator++() { gu_atomic_fetch_and_add(&i_, 1); return *this; } Atomic& operator--() { gu_atomic_fetch_and_sub(&i_, 1); return *this; } Atomic& operator+=(I i) { gu_atomic_fetch_and_add(&i_, i); return *this; } bool operator!=(I i) { return (operator()() != i); } private: #if !defined(__ATOMIC_RELAXED) // implementation of gu_atomic_get() via __sync_fetch_and_or() // is not read-only for GCC mutable #endif I i_; }; } #endif // ::GU_ATOMIC_HPP galera-3-25.3.20/galerautils/src/gu_asio.cpp0000644000015300001660000001104113042054732020370 0ustar jenkinsjenkins// // Copyright (C) 2014-2015 Codership Oy // #include "gu_config.hpp" #include "gu_asio.hpp" #include void gu::ssl_register_params(gu::Config& conf) { // register SSL config parameters conf.add(gu::conf::use_ssl); conf.add(gu::conf::ssl_cipher); conf.add(gu::conf::ssl_compression); conf.add(gu::conf::ssl_key); conf.add(gu::conf::ssl_cert); conf.add(gu::conf::ssl_ca); conf.add(gu::conf::ssl_password_file); } /* checks if all mandatory SSL options are set */ static bool ssl_check_conf(const gu::Config& conf) { using namespace gu; bool explicit_ssl(false); if (conf.is_set(conf::use_ssl)) { if (conf.get(conf::use_ssl) == false) { return false; // SSL is explicitly disabled } else { explicit_ssl = true; } } int count(0); count += conf.is_set(conf::ssl_key); count += conf.is_set(conf::ssl_cert); bool const use_ssl(explicit_ssl || count > 0); if (use_ssl && count < 2) { gu_throw_error(EINVAL) << "To enable SSL at least both of '" << conf::ssl_key << "' and '" << conf::ssl_cert << "' must be set"; } return use_ssl; } void gu::ssl_init_options(gu::Config& conf) { bool use_ssl(ssl_check_conf(conf)); if (use_ssl == true) { // set defaults // cipher list const std::string cipher_list(conf.get(conf::ssl_cipher, "AES128-SHA")); conf.set(conf::ssl_cipher, cipher_list); // compression bool compression(conf.get(conf::ssl_compression, true)); if (compression == false) { log_info << "disabling SSL compression"; sk_SSL_COMP_zero(SSL_COMP_get_compression_methods()); } conf.set(conf::ssl_compression, compression); // verify that asio::ssl::context can be initialized with provided // values try { asio::io_service io_service; asio::ssl::context ctx(io_service, asio::ssl::context::sslv23); ssl_prepare_context(conf, ctx); } catch (asio::system_error& ec) { gu_throw_error(EINVAL) << "Initializing SSL context failed: " << extra_error_info(ec.code()); } } } namespace { // Callback for reading SSL key protection password from file class SSLPasswordCallback { public: SSLPasswordCallback(const gu::Config& conf) : conf_(conf) { } std::string get_password() const { std::string file(conf_.get(gu::conf::ssl_password_file)); std::ifstream ifs(file.c_str(), std::ios_base::in); if (ifs.good() == false) { gu_throw_error(errno) << "could not open password file '" << file << "'"; } std::string ret; std::getline(ifs, ret); return ret; } private: const gu::Config& conf_; }; } void gu::ssl_prepare_context(const gu::Config& conf, asio::ssl::context& ctx, bool verify_peer_cert) { ctx.set_verify_mode(asio::ssl::context::verify_peer | (verify_peer_cert == true ? asio::ssl::context::verify_fail_if_no_peer_cert : 0)); SSLPasswordCallback cb(conf); ctx.set_password_callback( boost::bind(&SSLPasswordCallback::get_password, &cb)); std::string param; try { param = conf::ssl_key; ctx.use_private_key_file(conf.get(param), asio::ssl::context::pem); param = conf::ssl_cert; ctx.use_certificate_file(conf.get(param), asio::ssl::context::pem); param = conf::ssl_ca; ctx.load_verify_file(conf.get(param, conf.get(conf::ssl_cert))); param = conf::ssl_cipher; SSL_CTX_set_cipher_list(ctx.impl(), conf.get(param).c_str()); ctx.set_options(asio::ssl::context::no_sslv2 | asio::ssl::context::no_sslv3 | asio::ssl::context::no_tlsv1); } catch (asio::system_error& ec) { gu_throw_error(EINVAL) << "Bad value '" << conf.get(param, "") << "' for SSL parameter '" << param << "': " << extra_error_info(ec.code()); } catch (gu::NotSet& ec) { gu_throw_error(EINVAL) << "Missing required value for SSL parameter '" << param << "'"; } } galera-3-25.3.20/galerautils/src/gu_buf.h0000644000015300001660000000051513042054732017662 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy */ /** * @file generic buffer declaration * * $Id$ */ #ifndef _gu_buf_h_ #define _gu_buf_h_ #include "gu_types.h" #ifdef __cplusplus extern "C" { #endif struct gu_buf { const void* ptr; ssize_t size; }; #ifdef __cplusplus } #endif #endif /* _gu_buf_h_ */ galera-3-25.3.20/galerautils/src/gu_regex.cpp0000644000015300001660000000225213042054732020553 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy /** * @file Regular expressions parser based on POSIX regex functions in * * $Id$ */ #include "gu_utils.hpp" #include "gu_regex.hpp" namespace gu { using std::string; using std::vector; string RegEx::strerror (int rc) const { char buf[128]; regerror(rc, ®ex, buf, sizeof(buf)); return string (buf); } static inline RegEx::Match regmatch2Match (const string& str, const regmatch_t& rm) { if (rm.rm_so == -1) return RegEx::Match(); return RegEx::Match (str.substr(rm.rm_so, rm.rm_eo - rm.rm_so)); } vector RegEx::match (const string& str, size_t num) const { vector ret; int rc; VLA matches(num); if ((rc = regexec(®ex, str.c_str(), num, &matches, 0))) { gu_throw_error (EINVAL) << "regexec(" << str << "): " << strerror(rc); } for (size_t i = 0; i < num; ++i) { ret.push_back (regmatch2Match (str, matches[i])); } return ret; } } galera-3-25.3.20/galerautils/src/gu_vlq.cpp0000644000015300001660000000326313042054732020246 0ustar jenkinsjenkins// // Copyright (C) 2013 Codership Oy // //! // @file Variable-length quantity encoding for integers // // Unsigned integers: Implementation uses using unsigned LEB128, // see for example http://en.wikipedia.org/wiki/LEB128. // // Signed integers: TODO // #include "gu_vlq.hpp" namespace gu { /* checks helper for the uleb128_decode() */ void uleb128_decode_checks (const byte_t* buf, size_t buflen, size_t offset, size_t avail_bits) { // Check if trying to read past last byte in buffer without // encountering byte without 0x80 bit set. if (offset >= buflen) { gu_throw_error(EINVAL) << "read value is not uleb128 representation, missing " << "terminating byte before end of input"; } assert(avail_bits > 0); if (avail_bits < 7) { // mask to check if the remaining value can be represented // with available bits gu::byte_t mask(~((1 << avail_bits) - 1)); if ((buf[offset] & mask) != 0) { gu_throw_error(EOVERFLOW) << "read value not representable with avail bits: " << avail_bits << " mask: 0x" << std::hex << static_cast(mask) << " buf: 0x" << std::hex << static_cast(buf[offset]) << " excess: 0x" << std::hex << static_cast(mask & buf[offset]); } } } } /* namespace gu */ galera-3-25.3.20/galerautils/src/gu_vector.hpp0000644000015300001660000001051513042054732020751 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /*! * @file implementation of STL vector functionality "on the stack", that is * with initial buffer for allocations reserved inside the object: * * gu::Vector v; * v().resize(5); // uses internal buffer (in this case on the stack) * v().resize(20); // overflows into heap * * In many cases, when the number of elements in a vector is predictably low or * even known exactly, this will save us from going to heap just to allocate * few elements. * * Rather than manually rewriting all std::vector methods, we return * a reference to std::vector object via operator(). operator[] is also * rewritten to provide the familiar v[i] interface. * * $Id$ */ #ifndef _GU_VECTOR_HPP_ #define _GU_VECTOR_HPP_ #include "gu_reserved_container.hpp" #include namespace gu { /* gu::VectorBase is an interface to generalize gu::Vector template over * capacity so that it is possible to pass gu::Vector objects * by reference to gu::VectorBase */ template class VectorBase { public: typedef T value_type; typedef T& reference; typedef const T& const_reference; typedef typename ReservedAllocator::size_type size_type; virtual reference operator[] (size_type i) = 0; virtual const_reference operator[] (size_type i) const = 0; virtual size_type size () const = 0; virtual void reserve (size_type n) = 0; virtual void resize (size_type n, value_type val = value_type()) = 0; // Now iterators, which I have no time for ATM. Leaving unfinished. protected: VectorBase() {} virtual ~VectorBase() {} }; /* a base class to be used as a member of other classes */ template ::size_type capacity> class Vector { public: Vector() : rv_() {} Vector(const Vector& other) : rv_() { rv_().assign(other().begin(), other().end()); } Vector& operator= (Vector other) { using namespace std; swap(other); return *this; } typedef ReservedAllocator Allocator; typedef std::vector ContainerType; ContainerType& operator() () { return rv_.container(); } const ContainerType& operator() () const { return rv_.container(); } ContainerType* operator-> () { return rv_.operator->(); } const ContainerType* operator-> () const { return rv_.operator->(); } typedef typename VectorBase::size_type size_type; T& operator[] (size_type i) { return operator()()[i]; } const T& operator[] (size_type i) const { return operator()()[i]; } size_type size () const { return operator()().size(); } void reserve (size_type n) { operator()().reserve(n); } typedef typename VectorBase::value_type value_type; void resize (size_type n, value_type val = value_type()) { operator()().resize(n, val); } bool in_heap() const // for testing { return (rv_.reserved_buffer() != &rv_.container()[0]); } private: ReservedContainer rv_; }; /* class Vector*/ /* Vector class derived from VectorBase - to be passed as a parameter */ template ::size_type capacity> class VectorDerived : public VectorBase { public: typedef typename VectorBase::size_type size_type; typedef typename VectorBase::value_type value_type; typedef typename VectorBase::reference reference; typedef typename VectorBase::const_reference const_reference; VectorDerived() : VectorBase(), v_() {} template ::size_type C> VectorDerived(const Vector& other) : VectorBase(), v_() { v_().assign(other().begin(), other().end()); } reference operator[] (size_type i) { return v_[i]; } const_reference operator[] (size_type i) const { return v_[i]; } size_type size () const { return v_.size(); } void reserve (size_type n) { v_.reserve(); } void resize (size_type n, value_type val = value_type()) { v_.resize(); } private: Vector v_; }; /* class VectorDerived */ } /* namespace gu */ #endif /* _GU_VECTOR_HPP_ */ galera-3-25.3.20/galerautils/src/gu_mmh3.h0000644000015300001660000002461113042054732017755 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file MurmurHash3 header * * This code is based on the refrence C++ MurMurHash3 implementation by its * author Austin Appleby, who released it to public domain. * * $Id$ */ #ifndef _gu_mmh3_h_ #define _gu_mmh3_h_ #include "gu_byteswap.h" #include // for memset() and memcpy() #ifdef __cplusplus extern "C" { #endif //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche static GU_FORCE_INLINE uint32_t _mmh3_fmix32 (uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } static GU_FORCE_INLINE uint64_t _mmh3_fmix64 (uint64_t k) { k ^= k >> 33; k *= GU_ULONG_LONG(0xff51afd7ed558ccd); k ^= k >> 33; k *= GU_ULONG_LONG(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } //----------------------------------------------------------------------------- static uint32_t const _mmh3_32_c1 = 0xcc9e2d51; static uint32_t const _mmh3_32_c2 = 0x1b873593; static GU_FORCE_INLINE void _mmh3_block_32 (uint32_t k1, uint32_t* h1) { k1 *= _mmh3_32_c1; k1 = GU_ROTL32(k1,15); k1 *= _mmh3_32_c2; *h1 ^= k1; *h1 = GU_ROTL32(*h1,13); *h1 *= 5; *h1 += 0xe6546b64; } static GU_FORCE_INLINE void _mmh3_blocks_32 (const uint32_t* const blocks,size_t const nblocks,uint32_t* h1) { //---------- // body size_t i; for (i = 0; i < nblocks; i++) { //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here _mmh3_block_32 (gu_le32(blocks[i]), h1);/* convert from little-endian */ } } static GU_FORCE_INLINE uint32_t _mmh3_tail_32 (const uint8_t* const tail, size_t const len, uint32_t h1) { //---------- // tail #if 0 /* Reference implementation */ uint32_t k1 = 0; switch(len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= _mmh3_32_c1; k1 = GU_ROTL32(k1,15); k1 *= _mmh3_32_c2; h1 ^= k1; }; #else /* Optimized implementation */ size_t const shift = (len & 3) << 3; if (shift) { uint32_t k1 = gu_le32(((uint32_t*)tail)[0]) & (0x00ffffff>>(24-shift)); k1 *= _mmh3_32_c1; k1 = GU_ROTL32(k1,15); k1 *= _mmh3_32_c2; h1 ^= k1; } #endif /* Optimized implementation */ //---------- // finalization h1 ^= len; h1 = _mmh3_fmix32(h1); return h1; } static GU_FORCE_INLINE uint32_t _mmh32_seed (const void* key, size_t const len, uint32_t seed) { size_t const nblocks = len >> 2; const uint32_t* const blocks = (const uint32_t*)key; const uint8_t* const tail = (const uint8_t*)(blocks + nblocks); _mmh3_blocks_32 (blocks, nblocks, &seed); return _mmh3_tail_32 (tail, len, seed); } // same as FNV32 seed static uint32_t const GU_MMH32_SEED = GU_ULONG(2166136261); /*! A function to hash buffer in one go */ #define gu_mmh32(_buf, _len) \ _mmh32_seed (_buf, _len, GU_MMH32_SEED); /* * 128-bit MurmurHash3 */ static uint64_t const _mmh3_128_c1 = GU_ULONG_LONG(0x87c37b91114253d5); static uint64_t const _mmh3_128_c2 = GU_ULONG_LONG(0x4cf5ad432745937f); static GU_FORCE_INLINE void _mmh3_128_block (uint64_t k1, uint64_t k2, uint64_t* h1, uint64_t* h2) { k1 *= _mmh3_128_c1; k1 = GU_ROTL64(k1,31); k1 *= _mmh3_128_c2; *h1 ^= k1; *h1 = GU_ROTL64(*h1,27); *h1 += *h2; *h1 *= 5; *h1 += 0x52dce729; k2 *= _mmh3_128_c2; k2 = GU_ROTL64(k2,33); k2 *= _mmh3_128_c1; *h2 ^= k2; *h2 = GU_ROTL64(*h2,31); *h2 += *h1; *h2 *= 5; *h2 += 0x38495ab5; } static GU_FORCE_INLINE void _mmh3_128_blocks (const uint64_t* const blocks, size_t const nblocks, uint64_t* h1, uint64_t* h2) { //---------- // body size_t i; for(i = 0; i < nblocks; i++) { //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here uint64_t k1 = gu_le64(blocks[i]); i++; uint64_t k2 = gu_le64(blocks[i]); _mmh3_128_block (k1, k2, h1, h2); } } static GU_FORCE_INLINE void _mmh3_128_tail (const uint8_t* const tail, size_t const len, uint64_t h1, uint64_t h2, uint64_t* const out) { //---------- // tail uint64_t k1 = 0; uint64_t k2 = 0; switch(len & 15) { case 15: k2 ^= ((uint64_t)tail[14]) << 48; case 14: k2 ^= ((uint64_t)tail[13]) << 40; case 13: k2 ^= ((uint64_t)tail[12]) << 32; case 12: k2 ^= ((uint64_t)tail[11]) << 24; case 11: k2 ^= ((uint64_t)tail[10]) << 16; case 10: k2 ^= ((uint64_t)tail[ 9]) << 8; case 9: k2 ^= ((uint64_t)tail[ 8]) << 0; k2 *= _mmh3_128_c2; k2 = GU_ROTL64(k2,33); k2 *= _mmh3_128_c1; h2 ^= k2; k1 = gu_le64(((uint64_t*)tail)[0]); k1 *= _mmh3_128_c1; k1 = GU_ROTL64(k1,31); k1 *= _mmh3_128_c2; h1 ^= k1; break; case 8: k1 ^= ((uint64_t)tail[ 7]) << 56; case 7: k1 ^= ((uint64_t)tail[ 6]) << 48; case 6: k1 ^= ((uint64_t)tail[ 5]) << 40; case 5: k1 ^= ((uint64_t)tail[ 4]) << 32; case 4: k1 ^= ((uint64_t)tail[ 3]) << 24; case 3: k1 ^= ((uint64_t)tail[ 2]) << 16; case 2: k1 ^= ((uint64_t)tail[ 1]) << 8; case 1: k1 ^= ((uint64_t)tail[ 0]) << 0; k1 *= _mmh3_128_c1; k1 = GU_ROTL64(k1,31); k1 *= _mmh3_128_c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = _mmh3_fmix64(h1); h2 = _mmh3_fmix64(h2); h1 += h2; h2 += h1; out[0] = h1; out[1] = h2; } static GU_FORCE_INLINE void _mmh3_128_seed (const void* const key, size_t const len, uint64_t s1, uint64_t s2, uint64_t* const out) { size_t const nblocks = (len >> 4) << 1; /* using 64-bit half-blocks */ const uint64_t* const blocks = (const uint64_t*)(key); const uint8_t* const tail = (const uint8_t*)(blocks + nblocks); _mmh3_128_blocks (blocks, nblocks, &s1, &s2); _mmh3_128_tail (tail, len, s1, s2, out); } // same as FNV128 seed static uint64_t const GU_MMH128_SEED1 = GU_ULONG_LONG(0x6C62272E07BB0142); static uint64_t const GU_MMH128_SEED2 = GU_ULONG_LONG(0x62B821756295C58D); /* returns hash in the canonical byte order, as a byte array */ static GU_FORCE_INLINE void gu_mmh128 (const void* const msg, size_t const len, void* const out) { _mmh3_128_seed (msg, len, GU_MMH128_SEED1, GU_MMH128_SEED2, (uint64_t*)out); uint64_t* const res = (uint64_t*)out; res[0] = gu_le64(res[0]); res[1] = gu_le64(res[1]); } /* returns hash as an integer, in host byte-order */ static GU_FORCE_INLINE uint64_t gu_mmh128_64 (const void* const msg, size_t len) { uint64_t res[2]; _mmh3_128_seed (msg, len, GU_MMH128_SEED1, GU_MMH128_SEED2, res); return res[0]; } /* returns hash as an integer, in host byte-order */ static GU_FORCE_INLINE uint32_t gu_mmh128_32 (const void* const msg, size_t len) { uint64_t res[2]; _mmh3_128_seed (msg, len, GU_MMH128_SEED1, GU_MMH128_SEED2, res); return (uint32_t)res[0]; } /* * Functions to hash message by parts * (only 128-bit version, 32-bit is not relevant any more) */ typedef struct gu_mmh128_ctx { uint64_t hash[2]; uint64_t tail[2]; size_t length; } gu_mmh128_ctx_t; /*! Initialize/reset MMH context with a particular seed. * The seed is two 8-byte _integers_, obviously in HOST BYTE ORDER. * Should not be used directly. */ static GU_INLINE void _mmh128_init_seed (gu_mmh128_ctx_t* const mmh, uint64_t const s1, uint64_t const s2) { memset (mmh, 0, sizeof(*mmh)); mmh->hash[0] = s1; mmh->hash[1] = s2; } /*! Initialize MMH context with a default Galera seed. */ #define gu_mmh128_init(_mmh) \ _mmh128_init_seed (_mmh, GU_MMH128_SEED1, GU_MMH128_SEED2); /*! Apeend message part to hash context */ static GU_INLINE void gu_mmh128_append (gu_mmh128_ctx_t* const mmh, const void* part, size_t len) { size_t tail_len = mmh->length & 15; mmh->length += len; if (tail_len) /* there's something in the tail */// do we need this if()? { size_t const to_fill = 16 - tail_len; void* const tail_end = (uint8_t*)mmh->tail + tail_len; if (len >= to_fill) /* we can fill a full block */ { memcpy (tail_end, part, to_fill); _mmh3_128_block (gu_le64(mmh->tail[0]), gu_le64(mmh->tail[1]), &mmh->hash[0], &mmh->hash[1]); part = ((char*)part) + to_fill; len -= to_fill; } else { memcpy (tail_end, part, len); return; } } size_t const nblocks = (len >> 4) << 1; /* using 64-bit half-blocks */ const uint64_t* const blocks = (const uint64_t*)(part); _mmh3_128_blocks (blocks, nblocks, &mmh->hash[0], &mmh->hash[1]); /* save possible trailing bytes to tail */ memcpy (mmh->tail, blocks + nblocks, len & 15); } /*! Get the accumulated message hash (does not change the context) */ static GU_INLINE void gu_mmh128_get (const gu_mmh128_ctx_t* const mmh, void* const res) { uint64_t* const r = (uint64_t*)res; _mmh3_128_tail ((const uint8_t*)mmh->tail, mmh->length, mmh->hash[0], mmh->hash[1], r); r[0] = gu_le64(r[0]); r[1] = gu_le64(r[1]); } static GU_INLINE uint64_t gu_mmh128_get64 (const gu_mmh128_ctx_t* const mmh) { uint64_t res[2]; _mmh3_128_tail ((const uint8_t*)mmh->tail, mmh->length, mmh->hash[0], mmh->hash[1], res); return res[0]; } static GU_INLINE uint32_t gu_mmh128_get32 (const gu_mmh128_ctx_t* const mmh) { uint64_t res[2]; _mmh3_128_tail ((const uint8_t*)mmh->tail, mmh->length, mmh->hash[0], mmh->hash[1], res); return (uint32_t)res[0]; } /* * Below are fuctions with reference signatures for implementation verification */ extern void gu_mmh3_32 (const void* key, int len, uint32_t seed, void* out); #if 0 /* x86 variant is faulty and unsuitable for short keys, ignore */ extern void gu_mmh3_x86_128 (const void* key, int len, uint32_t seed, void* out); #endif /* 0 */ extern void gu_mmh3_x64_128 (const void* key, int len, uint32_t seed, void* out); #ifdef __cplusplus } #endif #endif /* _gu_mmh3_h_ */ galera-3-25.3.20/galerautils/src/gu_fdesc.hpp0000644000015300001660000000254213042054732020534 0ustar jenkinsjenkins/* * Copyright (C) 2009-2016 Codership Oy * * $Id$ */ #ifndef __GU_FDESC_HPP__ #define __GU_FDESC_HPP__ #include "gu_exception.hpp" #include "gu_types.hpp" // for off_t, byte_t #include namespace gu { class FileDescriptor { public: /* open existing file */ FileDescriptor (const std::string& fname, bool sync = true); /* (re)create file */ FileDescriptor (const std::string& fname, size_t length, bool allocate = true, bool sync = true); ~FileDescriptor (); int get() const { return fd_; } const std::string& name() const { return name_; } off_t size() const { return size_; } void sync() const; void unlink() const { ::unlink (name_.c_str()); } private: std::string const name_; int const fd_; off_t const size_; bool const sync_; // sync on close bool write_byte (off_t offset); void write_file (off_t start = 0); void prealloc (off_t start = 0); void constructor_common(); FileDescriptor (const FileDescriptor&); FileDescriptor& operator = (const FileDescriptor); }; } /* namespace gu */ #endif /* __GU_FDESC_HPP__ */ galera-3-25.3.20/galerautils/src/gu_mmap.cpp0000644000015300001660000000661213042054732020377 0ustar jenkinsjenkins/* * Copyright (C) 2009-2016 Codership Oy * * $Id$ */ #include "gu_mmap.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" #include "gu_macros.hpp" #include "gu_limits.h" // GU_PAGE_SIZE #include #include #if defined(__FreeBSD__) && defined(MAP_NORESERVE) /* FreeBSD has never implemented this flags and will deprecate it. */ #undef MAP_NORESERVE #endif #ifndef MAP_NORESERVE #define MAP_NORESERVE 0 #endif // to avoid -Wold-style-cast extern "C" { static const void* const GU_MAP_FAILED = MAP_FAILED; } namespace gu { MMap::MMap (const FileDescriptor& fd, bool const sequential) : size (fd.size()), ptr (mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NORESERVE, fd.get(), 0)), mapped (ptr != GU_MAP_FAILED) { if (!mapped) { gu_throw_error(errno) << "mmap() on '" << fd.name() << "' failed"; } #if defined(MADV_DONTFORK) if (posix_madvise (ptr, size, MADV_DONTFORK)) { # define MMAP_INHERIT_OPTION "MADV_DONTFORK" #elif defined(__FreeBSD__) if (minherit (ptr, size, INHERIT_NONE)) { # define MMAP_INHERIT_OPTION "INHERIT_NONE" #endif #if defined(MMAP_INHERIT_OPTION) int const err(errno); log_warn << "Failed to set " MMAP_INHERIT_OPTION " on " << fd.name() << ": " << err << " (" << strerror(err) << ")"; } #endif /* benefits are questionable */ if (sequential && posix_madvise (ptr, size, MADV_SEQUENTIAL)) { int const err(errno); log_warn << "Failed to set MADV_SEQUENTIAL on " << fd.name() << ": " << err << " (" << strerror(err) << ")"; } log_debug << "Memory mapped: " << ptr << " (" << size << " bytes)"; } void MMap::dont_need() const { if (posix_madvise(reinterpret_cast(ptr), size, MADV_DONTNEED)) { log_warn << "Failed to set MADV_DONTNEED on " << ptr << ": " << errno << " (" << strerror(errno) << ')'; } } void MMap::sync(void* const addr, size_t const length) const { /* libc msync() only accepts addresses multiple of page size, * rounding down */ static uint64_t const PAGE_SIZE_MASK(~(GU_PAGE_SIZE - 1)); uint8_t* const sync_addr(reinterpret_cast (uint64_t(addr) & PAGE_SIZE_MASK)); size_t const sync_length (length + (static_cast(addr) - sync_addr)); if (::msync(sync_addr, sync_length, MS_SYNC) < 0) { gu_throw_error(errno) << "msync(" << sync_addr << ", " << sync_length << ") failed"; } } void MMap::sync () const { log_info << "Flushing memory map to disk..."; sync(ptr, size); } void MMap::unmap () { if (munmap (ptr, size) < 0) { gu_throw_error(errno) << "munmap(" << ptr << ", " << size << ") failed"; } mapped = false; log_debug << "Memory unmapped: " << ptr << " (" << size <<" bytes)"; } MMap::~MMap () { if (mapped) { try { unmap(); } catch (Exception& e) { log_error << e.what(); } } } } galera-3-25.3.20/galerautils/src/gu_abort.h0000644000015300001660000000065013042054732020215 0ustar jenkinsjenkins// Copyright (C) 2012-2013 Codership Oy /** * @file "Clean" abort function to avoid stack and core dumps * * $Id$ */ #ifndef _gu_abort_h_ #define _gu_abort_h_ #ifdef __cplusplus extern "C" { #endif #include "gu_macros.h" /* This function is for clean aborts, when we can't gracefully exit otherwise */ extern void gu_abort() GU_NORETURN; #ifdef __cplusplus } #endif #endif /* _gu_abort_h_ */ galera-3-25.3.20/galerautils/src/gu_mutex.c0000644000015300001660000002362213042054732020247 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy /** * Debug versions of thread functions * * $Id$ */ #include #include #include #include "galerautils.h" #if 0 /* Is it usable? */ static const struct gu_mutex gu_mutex_init = { .target_mutex = PTHREAD_MUTEX_INITIALIZER, .control_mutex = PTHREAD_MUTEX_INITIALIZER, .lock_waiter_count = 0, .cond_waiter_count = 0, .holder_count = 0, .thread = 0, // unknown thread .file = __FILE__, .line = __LINE__ }; #endif int gu_mutex_init_dbg (struct gu_mutex *m, const pthread_mutexattr_t* attr, const char *file, unsigned int line) { m->file = file; m->line = line; m->lock_waiter_count = 0; m->cond_waiter_count = 0; m->holder_count = 0; m->thread = pthread_self(); pthread_mutex_init(&m->control_mutex, NULL); pthread_mutex_init(&m->target_mutex, attr); return 0; // as per pthread spec } int gu_mutex_lock_dbg(struct gu_mutex *m, const char *file, unsigned int line) { int err = 0; pthread_mutex_lock(&m->control_mutex); { if (m->holder_count > 0 && pthread_equal(pthread_self(), m->thread)) { // Have to explicitly submit file and line info as they come // from a totally different place gu_fatal("Second mutex lock attempt by the same thread, %lu, " "at %s:%d, first locked at %s:%d", pthread_self(), file, line, m->file, m->line); assert(0); err = EDEADLK; /* return error in case assert is not defined */ } m->lock_waiter_count++; } /* unlocking control mutex here since we may block waiting for target * mutext and unlocking target mutex again involves locking the control */ pthread_mutex_unlock(&m->control_mutex); if (err) return err; /* request the actual mutex */ if ((err = pthread_mutex_lock(&m->target_mutex))) { /* This i a valid situation - mutex could be destroyed */ gu_debug("%lu mutex lock error (%d: %s) at %s:%d", pthread_self(), err, strerror(err), file, line); return err; } /* need control mutex for info field changes */ if ((err = pthread_mutex_lock(&m->control_mutex))) { // do we need this check - it's only a control mutex? gu_fatal("%lu mutex lock error (%d: %s) at %s:%d", pthread_self(), err, strerror(err), file, line); assert(0); } else { if (gu_likely(m->holder_count == 0)) { m->thread = pthread_self(); m->lock_waiter_count--; m->holder_count++; m->file = file; m->line = line; } else { gu_fatal("Mutex lock granted %d times at %s:%d", m->holder_count, file, line); assert(0); } pthread_mutex_unlock(&m->control_mutex); } /* we have to return 0 here since target mutex was successfully locked */ return 0; } int gu_mutex_unlock_dbg (struct gu_mutex *m, const char *file, unsigned int line) { int err = 0; pthread_mutex_lock(&m->control_mutex); { /** must take into account that mutex unlocking can happen in * cleanup handlers when thread is terminated in cond_wait(). * Then holder_count would still be 0 (see gu_cond_wait()), * but cond_waiter - not */ if (m->holder_count == 0 && m->cond_waiter_count == 0) { gu_fatal ("%lu attempts to unlock unlocked mutex at %s:%d. " "Last use at %s:%d", pthread_self(), file, line, m->file ? m->file : "" , m->line); assert(0); } if (m->holder_count > 0 && !pthread_equal(pthread_self(), m->thread)) { /** last time pthread_t was unsigned long int */ gu_fatal ("%lu attempts to unlock mutex owned by %lu at %s:%d. " "Locked at %s:%d", pthread_self(), m->thread, file, line, m->file, m->line); assert(0); return EPERM; /** return in case assert is undefined */ } err = pthread_mutex_unlock (&m->target_mutex); if (gu_likely(!err)) { m->file = file; m->line = line; m->thread = 0; /* At this point it is difficult to say if we're unlocking * normally or from cancellation handler, if holder_count not 0 - * assume it is normal unlock, otherwise we decrement * cond_waiter_count */ if (gu_likely(m->holder_count)) { m->holder_count--; } else { if (gu_likely(0 != m->cond_waiter_count)) { m->cond_waiter_count--; } else { gu_fatal ("Internal galerautils error: both holder_count " "and cond_waiter_count are 0"); assert (0); } } } else { gu_fatal("Error: (%d,%d) during mutex unlock at %s:%d", err, errno, file, line); assert(0); } } pthread_mutex_unlock(&m->control_mutex); return err; } int gu_mutex_destroy_dbg (struct gu_mutex *m, const char *file, unsigned int line) { int err=0; pthread_mutex_lock(&m->control_mutex); { if (!m->file) { gu_fatal("%lu attempts to destroy uninitialized mutex at %s:%d", pthread_self(), file, line); assert(0); } if (m->holder_count != 0) { if (pthread_self() == m->thread) { gu_fatal ("%lu attempts to destroy mutex locked by " "itself at %s:%d", pthread_self(), m->file, m->line); assert (0); /* logical error in program */ } else { gu_debug("%lu attempts to destroy a mutex at %s:%d " "locked by %lu at %s:%d (not error)", pthread_self(), file, line, m->thread, m->file, m->line); // assert (0); // DELETE when not needed! } } if (m->cond_waiter_count != 0) { gu_debug("%lu attempts to destroy a mutex at %s:%d " "that is waited by %d thread(s)", pthread_self(), file, line, m->cond_waiter_count); assert (m->cond_waiter_count > 0); } if ((err = pthread_mutex_destroy(&m->target_mutex))) { gu_debug("Error (%d: %s, %d) during mutex destroy at %s:%d", err, strerror(err), errno, file, line); pthread_mutex_unlock (&m->control_mutex); return err; } m->file = 0; m->line = 0; m->thread = 0; } pthread_mutex_unlock(&m->control_mutex); while (pthread_mutex_destroy(&m->control_mutex)); return err; } int gu_cond_wait_dbg (pthread_cond_t *cond, struct gu_mutex *m, const char *file, unsigned int line) { int err = 0; // Unfortunately count updates here are not atomic with cond_wait. // But cond_wait() semantics does not allow them to be. pthread_mutex_lock (&m->control_mutex); { if (gu_unlikely(m->holder_count <= 0)) { gu_fatal ("%lu tries to wait for condition on unlocked mutex " "at %s %d", pthread_self(), file, line); assert (0); } else if (!pthread_equal(pthread_self(), m->thread)) { gu_fatal ("%lu tries to wait for condition on the mutex that" "belongs to %lu at %s %d", pthread_self(), m->thread, file, line); assert (0); } /** pthread_cond_wait frees the mutex */ m->holder_count--; m->cond_waiter_count++; m->thread = 0; assert (m->holder_count >= 0); } pthread_mutex_unlock(&m->control_mutex); if ((err = pthread_cond_wait (cond, &m->target_mutex))) { gu_fatal("Error (%d: %s, %d) during cond_wait at %s:%d", err, strerror(err), errno, file, line); assert(0); } pthread_mutex_lock (&m->control_mutex); { /** acquired mutex again */ m->holder_count++; m->cond_waiter_count--; m->thread = pthread_self(); } pthread_mutex_unlock(&m->control_mutex); return err; } #if defined(__APPLE__) int pthread_barrier_init (pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count) { if(count == 0) { errno = EINVAL; return -1; } if(pthread_mutex_init (&barrier->mutex, 0) < 0) { return -1; } if(pthread_cond_init (&barrier->cond, 0) < 0) { pthread_mutex_destroy (&barrier->mutex); return -1; } barrier->tripCount = count; barrier->count = 0; return 0; } int pthread_barrier_destroy (pthread_barrier_t *barrier) { pthread_cond_destroy (&barrier->cond); pthread_mutex_destroy (&barrier->mutex); return 0; } int pthread_barrier_wait (pthread_barrier_t *barrier) { pthread_mutex_lock (&barrier->mutex); ++(barrier->count); if(barrier->count >= barrier->tripCount) { barrier->count = 0; pthread_cond_broadcast (&barrier->cond); pthread_mutex_unlock (&barrier->mutex); return 1; } else { pthread_cond_wait (&barrier->cond, &(barrier->mutex)); pthread_mutex_unlock (&barrier->mutex); return 0; } } #endif /* __APPLE__ */ galera-3-25.3.20/galerautils/src/gu_mem.h0000644000015300001660000000400413042054732017661 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy /** * @file * Declarations of memory allocation functions and macros * * $Id$ */ #ifndef _gu_mem_h_ #define _gu_mem_h_ #include #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** @name Functions to help with dynamic allocation debugging. * Take additional __FILE__ and __LINE__ arguments. Should be * used as part of macros defined below */ /*@{*/ void* gu_malloc_dbg (size_t size, const char* file, unsigned int line); void* gu_calloc_dbg (size_t nmemb, size_t size, const char* file, unsigned int line); void* gu_realloc_dbg (void* ptr, size_t size, const char* file, unsigned int line); void gu_free_dbg (void* ptr, const char* file, unsigned int line); /*@}*/ /** Reports statistics on the current amount of allocated memory * total number of allocations and deallocations */ void gu_mem_stats (ssize_t* total, ssize_t* allocs, ssize_t* reallocs, ssize_t* deallocs); /** @name Applications should use the following macros */ /*@{*/ #ifdef DEBUG_MALLOC #define gu_malloc(S) gu_malloc_dbg ((S), __FILE__, __LINE__) #define gu_calloc(N,S) gu_calloc_dbg ((N), (S), __FILE__, __LINE__) #define gu_realloc(P,S) gu_realloc_dbg ((P), (S), __FILE__, __LINE__) #define gu_free(P) gu_free_dbg ((P), __FILE__, __LINE__) #else /* !DEBUG_MALLOC - use standard allocation routines */ #define gu_malloc(S) malloc ((S)) #define gu_calloc(N,S) calloc ((N), (S)) #define gu_realloc(P,S) realloc ((P), (S)) #define gu_free(P) free ((P)) #endif /* DEBUG_MALLOC */ /** Convenience macros - to avoid code clutter */ #define GU_MALLOC(type) (type*) gu_malloc (sizeof(type)) #define GU_MALLOCN(N,type) (type*) gu_malloc ((N) * sizeof(type)) #define GU_CALLOC(N,type) (type*) gu_calloc ((N), sizeof(type)) #define GU_REALLOC(P,N,type) (type*) gu_realloc((P), (N) * sizeof(type)) /*@}*/ #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_mem_h_ */ galera-3-25.3.20/galerautils/src/gu_datetime.hpp0000644000015300001660000001465113042054732021250 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! * @file Date/time manipulation classes providing nanosecond resolution. */ #ifndef __GU_DATETIME__ #define __GU_DATETIME__ #include "gu_exception.hpp" #include "gu_regex.hpp" #include "gu_time.h" #include #include #include namespace gu { namespace datetime { /* Multiplier constants */ const long long NSec = 1; const long long USec = 1000*NSec; const long long MSec = 1000*USec; const long long Sec = 1000*MSec; const long long Min = 60*Sec; const long long Hour = 60*Min; const long long Day = 24*Hour; const long long Month = 30*Day; const long long Year = 12*Month; /*! * @brief Class representing time periods instead of * system clock time. */ class Period { public: /*! * @brief Constructor * * Duration format is PnYnMnDTnHnMnS where Y is year, M is month, * D is day, T is the time designator separating date and time * parts, H denotes hours, M (after T) is minutes and S seconds. * * All other n:s are expected to be integers except the one * before S which can be decimal to represent fractions of second. * * @param str Time period represented in ISO8601 duration format. */ Period(const std::string& str = "") : nsecs() { if (str != "") parse(str); } Period(const long long nsecs_) : nsecs(nsecs_) { } static Period min() { return 0; } static Period max() { return std::numeric_limits::max();} bool operator==(const Period& cmp) const { return (nsecs == cmp.nsecs); } bool operator<(const Period& cmp) const { return (nsecs < cmp.nsecs); } bool operator>=(const Period& cmp) const { return !(*this < cmp); } Period operator+(const long long add) const { return (nsecs + add); } Period operator-(const long long dec) const { return (nsecs - dec); } Period operator*(const long long mul) const { return (nsecs*mul); } Period operator/(const long long div) const { return (nsecs/div); } long long get_nsecs() const { return nsecs; } Period operator+(const Period& add) const { return (nsecs + add.nsecs); } Period operator-(const Period& dec) const { return (nsecs - dec.nsecs); } private: friend class Date; friend std::istream& operator>>(std::istream&, Period&); static const char* const period_regex; /*! regexp string */ static RegEx const regex; /*! period string parser */ /*! * @brief Parse period string. */ void parse(const std::string&); long long nsecs; }; /*! * @brief Date/time representation. * * @todo Parsing date from string is not implemented yet, * only possible to get current system time or * maximum time. */ class Date { public: /*! * @brief Get system time. * @note This call should be deprecated in favor of calendar() * and monotonic(). */ static inline Date now() { return gu_time_monotonic(); } /*! * @brief Get time from system-wide realtime clock. */ static inline Date calendar() { return gu_time_calendar(); } /*! * @brief Get time from monotonic clock. */ static inline Date monotonic() { return gu_time_monotonic(); } /*! * @brief Get maximum representable timestamp. */ static inline Date max() { return std::numeric_limits::max(); } /*! * @brief Get zero time */ static inline Date zero() { return 0; } /*! * Return 64-bit timestamp representing system time in nanosecond * resolution. */ long long get_utc() const { return utc; } /* Standard comparision operators */ bool operator==(const Date cmp) const { return (utc == cmp.utc); } bool operator<(const Date cmp) const { return (utc < cmp.utc); } /*! * @brief Add period to Date */ Date operator+(const Period& add) const { return (utc + add.get_nsecs()); } /*! * @brief Decrement period from Date */ Date operator-(const Period& dec) const { return (utc - dec.get_nsecs()); } Period operator-(const Date& dec) const { return (utc - dec.utc); } Date(const long long utc_ = 0) : utc(utc_) { } /*! convert to timespec - for internal use */ void _timespec(timespec& ts) const { ts.tv_sec = utc / 1000000000L; ts.tv_nsec = utc % 1000000000L; } private: long long utc; /*!< System time in nanosecond precision */ /*! * @brief Parse date from string. * @todo Not implemented yet */ void parse(const std::string& str_); }; /*! * @brief Output operator for Date class. * @todo Not implemented yet */ std::ostream& operator<<(std::ostream&, const Date&); /*! * @brief Output operator for Period type. */ std::ostream& operator<<(std::ostream&, const Period&); inline std::string to_string(const Period& p) { std::ostringstream os; os << p; return os.str(); } inline std::istream& operator>>(std::istream& is, Period& p) { std::string str; is >> str; p.parse(str); return is; } } // namespace datetime } // namespace gu #endif // __GU_DATETIME__ galera-3-25.3.20/galerautils/src/gu_mmap.hpp0000644000015300001660000000115613042054732020402 0ustar jenkinsjenkins/* * Copyright (C) 2009-2016 Codership Oy * * $Id$ */ #ifndef __GCACHE_MMAP__ #define __GCACHE_MMAP__ #include "gu_fdesc.hpp" namespace gu { class MMap { public: size_t const size; void* const ptr; MMap (const FileDescriptor& fd, bool sequential = false); ~MMap (); void dont_need() const; void sync(void *addr, size_t length) const; void sync() const; void unmap(); private: bool mapped; // This class is definitely non-copyable MMap (const MMap&); MMap& operator = (const MMap); }; } /* namespace gu */ #endif /* __GCACHE_MMAP__ */ galera-3-25.3.20/galerautils/src/gu_rand.c0000644000015300001660000000212413042054732020023 0ustar jenkinsjenkins// Copyright (C) 2013-2015 Codership Oy /** * @file routines to generate "random" seeds for RNGs by collecting some easy * entropy. * * gu_rand_seed_long() goes for srand48() * * gu_rand_seed_int() goes for srand() and rand_r() * * $Id$ */ #include "gu_rand.h" #include "gu_hash.h" /*! Structure to hold entropy data. * Should be at least 20 bytes on 32-bit systems and 28 bytes on 64-bit */ /* Packed to avoid uninitialized data warnings when passed to hash */ struct gu_rse { long long time; const void* heap_ptr; const void* stack_ptr; long pid; }__attribute__((packed)); typedef struct gu_rse gu_rse_t; long int gu_rand_seed_long (long long time, const void* heap_ptr, pid_t pid) { gu_rse_t rse = { time, heap_ptr, &time, pid }; return gu_fast_hash64_medium (&rse, sizeof(rse)); } #if GU_WORDSIZE == 32 unsigned int gu_rand_seed_int (long long time, const void* heap_ptr, pid_t pid) { gu_rse_t rse = { time, heap_ptr, &time, pid }; return gu_fast_hash32_short (&rse, sizeof(rse)); } #endif /* GU_WORDSIZE == 32 */ galera-3-25.3.20/galerautils/src/gu_fifo.c0000644000015300001660000003461713042054732020036 0ustar jenkinsjenkins/* * Copyright (C) 2008-2013 Codership Oy * * Queue (FIFO) class implementation * * The driving idea behind this class is avoiding mallocs * at all costs on one hand, on the other - make it almost * as infinite as an ordinary linked list. FIFO properties * help achieving that. * * When needed this FIFO can be made very big, holding * millions or even billions of items while taking up * minimum space when there are few items in the queue. */ #define _DEFAULT_SOURCE #include #include #include #include "gu_assert.h" #include "gu_limits.h" #include "gu_mem.h" #include "gu_mutex.h" #include "gu_log.h" #include "gu_fifo.h" #include "galerautils.h" struct gu_fifo { ulong col_shift; ulong col_mask; ulong rows_num; ulong head; ulong tail; ulong row_size; ulong length; ulong length_mask; ulong alloc; long get_wait; long put_wait; long long q_len; long long q_len_samples; uint item_size; uint used; uint used_max; uint used_min; int get_err; bool closed; #ifndef NDEBUG bool locked; #endif gu_mutex_t lock; gu_cond_t get_cond; gu_cond_t put_cond; void* rows[]; }; /* Don't make rows less than 1K */ #define GCS_FIFO_MIN_ROW_POWER 10 typedef unsigned long long ull; /* constructor */ gu_fifo_t *gu_fifo_create (size_t length, size_t item_size) { int row_pwr = GCS_FIFO_MIN_ROW_POWER; ull row_len = 1 << row_pwr; ull row_size = row_len * item_size; int array_pwr = 1; // need at least 2 rows for alteration ull array_len = 1 << array_pwr; ull array_size = array_len * sizeof(void*); gu_fifo_t *ret = NULL; if (length > 0 && item_size > 0) { /* find the best ratio of width and height: * the size of a row array must be equal to that of the row */ while (array_len * row_len < length) { if (array_size < row_size) { array_pwr++; array_len = 1 << array_pwr; array_size = array_len * sizeof(void*); } else { row_pwr++; row_len = 1 << row_pwr; row_size = row_len * item_size; } } ull alloc_size = array_size + sizeof (gu_fifo_t); if (alloc_size > (size_t)-1) { gu_error ("Initial FIFO size %llu exceeds size_t range %zu", alloc_size, (size_t)-1); return NULL; } ull max_size = array_len * row_size + alloc_size; if (max_size > (size_t)-1) { gu_error ("Maximum FIFO size %llu exceeds size_t range %zu", max_size, (size_t)-1); return NULL; } if (max_size > gu_avphys_bytes()) { gu_error ("Maximum FIFO size %llu exceeds available memory " "limit %llu", max_size, gu_avphys_bytes()); return NULL; } if ((array_len * row_len) > (ull)GU_LONG_MAX) { gu_error ("Resulting queue length %llu exceeds max allowed %ld", array_len * row_len, GU_LONG_MAX); return NULL; } gu_debug ("Creating FIFO buffer of %llu elements of size %llu, " "memory min used: %zu, max used: %zu", array_len * row_len, item_size, alloc_size, alloc_size + array_len*row_size); ret = gu_malloc (alloc_size); if (ret) { memset (ret, 0, alloc_size); ret->col_shift = row_pwr; ret->col_mask = row_len - 1; ret->rows_num = array_len; ret->length = row_len * array_len; ret->length_mask = ret->length - 1; ret->item_size = item_size; ret->row_size = row_size; ret->alloc = alloc_size; gu_mutex_init (&ret->lock, NULL); gu_cond_init (&ret->get_cond, NULL); gu_cond_init (&ret->put_cond, NULL); } else { gu_error ("Failed to allocate %zu bytes for FIFO", alloc_size); } } return ret; } // defined as macro for proper line reporting #ifdef NDEBUG #define fifo_lock(q) \ if (gu_likely (0 == gu_mutex_lock (&q->lock))) {} \ else { \ gu_fatal ("Failed to lock queue"); \ abort(); \ } #else /* NDEBUG */ #define fifo_lock(q) \ if (gu_likely (0 == gu_mutex_lock (&q->lock))) { \ q->locked = true; \ } \ else { \ gu_fatal ("Failed to lock queue"); \ abort(); \ } #endif /* NDEBUG */ static inline int fifo_unlock (gu_fifo_t* q) { #ifndef NDEBUG q->locked = false; #endif return -gu_mutex_unlock (&q->lock); } #ifndef NDEBUG bool gu_fifo_locked (gu_fifo_t* q) { return q->locked; } #endif /* lock the queue */ void gu_fifo_lock (gu_fifo_t *q) { fifo_lock(q); } /* unlock the queue */ void gu_fifo_release (gu_fifo_t *q) { fifo_unlock(q); } static int fifo_flush (gu_fifo_t* q) { int ret = 0; /* if there are items in the queue, wait until they are all fetched */ while (q->used > 0 && 0 == ret) { /* will make getters to signal every time item is removed */ gu_warn ("Waiting for %lu items to be fetched.", q->used); q->put_wait++; ret = gu_cond_wait (&q->put_cond, &q->lock); } return ret; } static void fifo_close (gu_fifo_t* q) { if (!q->closed) { q->closed = true; /* force putters to quit */ /* don't overwrite existing get_err status, see gu_fifo_resume_gets() */ if (!q->get_err) q->get_err = -ENODATA; // signal all the idle waiting threads gu_cond_broadcast (&q->put_cond); q->put_wait = 0; gu_cond_broadcast (&q->get_cond); q->get_wait = 0; #if 0 (void) fifo_flush (q); #endif } } void gu_fifo_close (gu_fifo_t* q) { fifo_lock (q); fifo_close (q); fifo_unlock (q); } void gu_fifo_open (gu_fifo_t* q) { fifo_lock (q); q->closed = false; q->get_err = 0; fifo_unlock (q); } /* lock the queue and wait if it is empty */ static inline int fifo_lock_get (gu_fifo_t *q) { int ret = 0; fifo_lock(q); while (0 == ret && !(ret = q->get_err) && 0 == q->used) { #ifndef NDEBUG q->locked = false; #endif q->get_wait++; ret = -gu_cond_wait (&q->get_cond, &q->lock); #ifndef NDEBUG q->locked = true; #endif } return ret; } /* unlock the queue after getting item */ static inline int fifo_unlock_get (gu_fifo_t *q) { assert (q->used < q->length || 0 == q->length); if (q->put_wait > 0) { q->put_wait--; gu_cond_signal (&q->put_cond); } return fifo_unlock(q); } /* lock the queue and wait if it is full */ static inline int fifo_lock_put (gu_fifo_t *q) { int ret = 0; fifo_lock(q); while (0 == ret && q->used == q->length && !q->closed) { #ifndef NDEBUG q->locked = false; #endif q->put_wait++; ret = -gu_cond_wait (&q->put_cond, &q->lock); #ifndef NDEBUG q->locked = true; #endif } return ret; } /* unlock the queue after putting an item */ static inline int fifo_unlock_put (gu_fifo_t *q) { assert (q->used > 0); if (q->get_wait > 0) { q->get_wait--; gu_cond_signal (&q->get_cond); } return fifo_unlock(q); } #define FIFO_ROW(q,x) ((x) >> q->col_shift) /* div by row width */ #define FIFO_COL(q,x) ((x) & q->col_mask) /* remnant */ #define FIFO_PTR(q,x) \ ((uint8_t*)q->rows[FIFO_ROW(q, x)] + FIFO_COL(q, x) * q->item_size) /* Increment and roll over */ #define FIFO_INC(q,x) (((x) + 1) & q->length_mask) /*! If FIFO is not empty, returns pointer to the head item and locks FIFO, * otherwise blocks. Or returns NULL if FIFO is closed. */ void* gu_fifo_get_head (gu_fifo_t* q, int* err) { *err = fifo_lock_get (q); if (gu_likely(-ECANCELED != *err && q->used)) { return (FIFO_PTR(q, q->head)); } else { assert (q->get_err); fifo_unlock (q); return NULL; } } /*! Advances FIFO head and unlocks FIFO. */ void gu_fifo_pop_head (gu_fifo_t* q) { if (FIFO_COL(q, q->head) == q->col_mask) { /* removing last unit from the row */ ulong row = FIFO_ROW (q, q->head); assert (q->rows[row] != NULL); gu_free (q->rows[row]); q->rows[row] = NULL; q->alloc -= q->row_size; } q->head = FIFO_INC(q, q->head); q->used--; if (gu_unlikely(q->used < q->used_min)) { q->used_min = q->used; } if (fifo_unlock_get(q)) { gu_fatal ("Faled to unlock queue to get item."); abort(); } } /*! If FIFO is not full, returns pointer to the tail item and locks FIFO, * otherwise blocks. Or returns NULL if FIFO is closed. */ void* gu_fifo_get_tail (gu_fifo_t* q) { fifo_lock_put (q); if (gu_likely(!q->closed)) { // stop adding items when closed ulong row = FIFO_ROW(q, q->tail); assert (q->used < q->length); // check if row is allocated and allocate if not. if (NULL == q->rows[row] && NULL == (q->alloc += q->row_size, q->rows[row] = gu_malloc(q->row_size))) { q->alloc -= q->row_size; } else { return ((uint8_t*)q->rows[row] + FIFO_COL(q, q->tail) * q->item_size); } #if 0 // for debugging if (NULL == q->rows[row]) { gu_debug ("Allocating row %lu of queue %p, rows %p", row, q, q->rows); if (NULL == (q->rows[row] = gu_malloc(q->row_size))) { gu_debug ("Allocating row %lu failed", row); fifo_unlock (q); return NULL; } q->alloc += q->row_size; } return (q->rows[row] + FIFO_COL(q, q->tail) * q->item_size); #endif } fifo_unlock (q); return NULL; } /*! Advances FIFO tail and unlocks FIFO. */ void gu_fifo_push_tail (gu_fifo_t* q) { q->tail = FIFO_INC(q, q->tail); q->q_len += q->used; q->used++; if (gu_unlikely(q->used > q->used_max)) { q->used_max = q->used; } q->q_len_samples++; if (fifo_unlock_put(q)) { gu_fatal ("Faled to unlock queue to put item."); abort(); } } /*! returns how many items are in the queue */ long gu_fifo_length (gu_fifo_t* q) { return q->used; } /*! returns how many items were in the queue per push_tail() */ void gu_fifo_stats_get (gu_fifo_t* q, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg) { fifo_lock (q); *q_len = q->used; *q_len_max = q->used_max; *q_len_min = q->used_min; long long len = q->q_len; long long samples = q->q_len_samples; fifo_unlock (q); if (len >= 0 && samples >= 0) { if (samples > 0) { *q_len_avg = ((double)len) / samples; } else { assert (0 == len); *q_len_avg = 0.0; } } else { *q_len_avg = -1.0; } } void gu_fifo_stats_flush(gu_fifo_t* q) { fifo_lock (q); q->used_max = q->used; q->used_min = q->used; q->q_len = 0; q->q_len_samples = 0; fifo_unlock (q); } /* destructor - would block until all members are dequeued */ void gu_fifo_destroy (gu_fifo_t *queue) { fifo_lock (queue); { if (!queue->closed) fifo_close(queue); fifo_flush (queue); } fifo_unlock (queue); assert (queue->tail == queue->head); while (gu_cond_destroy (&queue->put_cond)) { fifo_lock (queue); gu_cond_signal (&queue->put_cond); fifo_unlock (queue); /* when thread sees that ret->used == 0, it must terminate */ } while (gu_cond_destroy (&queue->get_cond)) { fifo_lock (queue); gu_cond_signal (&queue->get_cond); fifo_unlock (queue); /* when thread sees that ret->used == 0, it must terminate */ } while (gu_mutex_destroy (&queue->lock)) continue; /* only one row migth be left */ { ulong row = FIFO_ROW(queue, queue->tail); if (queue->rows[row]) { assert (FIFO_COL(queue, queue->tail) != 0); gu_free (queue->rows[row]); queue->alloc -= queue->row_size; } else { assert (FIFO_COL(queue, queue->tail) == 0); } gu_free (queue); } } char *gu_fifo_print (gu_fifo_t *queue) { size_t tmp_len = 4096; char tmp[tmp_len]; char *ret; snprintf (tmp, tmp_len, "Queue (%p):\n" "\tlength = %lu\n" "\trows = %lu\n" "\tcolumns = %lu\n" "\tused = %u (%zu bytes)\n" "\talloctd = %lu bytes\n" "\thead = %lu, tail = %lu\n" "\tavg.len = %f" //", next = %lu" , (void*)queue, queue->length, queue->rows_num, queue->col_mask + 1, queue->used, (size_t)queue->used * queue->item_size, queue->alloc, queue->head, queue->tail, queue->q_len_samples > 0 ? ((double)queue->q_len)/queue->q_len_samples : 0.0 //, queue->next ); ret = strdup (tmp); return ret; } int gu_fifo_cancel_gets (gu_fifo_t* q) { if (q->get_err && -ENODATA != q->get_err) { gu_error ("Attempt to cancel FIFO gets in state: %d (%s)", q->get_err, strerror(-q->get_err)); return -EBADFD; } assert (!q->get_err || q->closed); q->get_err = -ECANCELED; /* force getters to quit with specific error */ if (q->get_wait) { gu_cond_broadcast (&q->get_cond); q->get_wait = 0; } return 0; } int gu_fifo_resume_gets (gu_fifo_t* q) { int ret = -1; fifo_lock(q); if (-ECANCELED == q->get_err) { q->get_err = q->closed ? -ENODATA : 0; ret = 0; } else { gu_error ("Attempt to resume FIFO gets in state: %d (%s)", q->get_err, strerror(-q->get_err)); ret = -EBADFD; } fifo_unlock(q); return ret; } galera-3-25.3.20/galerautils/src/gu_lock_step.c0000644000015300001660000000671113042054732021070 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Universally Unique IDentifier. RFC 4122. * Time-based implementation. * */ #include // abort() #include // error codes #include #include #include // strerror() #include "gu_log.h" #include "gu_assert.h" #include "gu_time.h" #include "gu_lock_step.h" void gu_lock_step_init (gu_lock_step_t* ls) { gu_mutex_init (&ls->mtx, NULL); gu_cond_init (&ls->cond, NULL); ls->wait = 0; ls->cont = 0; ls->enabled = false; } void gu_lock_step_destroy (gu_lock_step_t* ls) { // this is not really fool-proof, but that's not for fools to use while (gu_lock_step_cont(ls, 10)) {}; gu_cond_destroy (&ls->cond); gu_mutex_destroy (&ls->mtx); assert (0 == ls->wait); } void gu_lock_step_enable (gu_lock_step_t* ls, bool enable) { if (!gu_mutex_lock (&ls->mtx)) { ls->enabled = enable; gu_mutex_unlock (&ls->mtx); } else { gu_fatal ("Mutex lock failed"); assert (0); abort(); } } void gu_lock_step_wait (gu_lock_step_t* ls) { if (!gu_mutex_lock (&ls->mtx)) { if (ls->enabled) { if (!ls->cont) { // wait for signal ls->wait++; gu_cond_wait (&ls->cond, &ls->mtx); } else { // signal to signaller gu_cond_signal (&ls->cond); ls->cont--; } } gu_mutex_unlock (&ls->mtx); } else { gu_fatal ("Mutex lock failed"); assert (0); abort(); } } /* returns how many waiters are there */ long gu_lock_step_cont (gu_lock_step_t* ls, long timeout_ms) { long ret = -1; if (!gu_mutex_lock (&ls->mtx)) { if (ls->enabled) { if (ls->wait > 0) { // somebody's waiting ret = ls->wait; gu_cond_signal (&ls->cond); ls->wait--; } else if (timeout_ms > 0) { // wait for waiter // what a royal mess with times! Why timeval exists? struct timeval now; struct timespec timeout; long err; gettimeofday (&now, NULL); gu_timeval_add (&now, timeout_ms * 0.001); timeout.tv_sec = now.tv_sec; timeout.tv_nsec = now.tv_usec * 1000; ls->cont++; do { err = gu_cond_timedwait (&ls->cond, &ls->mtx, &timeout); } while (EINTR == err); assert ((0 == err) || (ETIMEDOUT == err && ls->cont > 0)); ret = (0 == err); // successful rendezvous with waiter ls->cont -= (0 != err); // self-decrement in case of error } else if (timeout_ms < 0) { // wait forever long err; ls->cont++; err = gu_cond_wait (&ls->cond, &ls->mtx); ret = (0 == err); // successful rendezvous with waiter ls->cont -= (0 != err); // self-decrement in case of error } else { // don't wait ret = 0; } } gu_mutex_unlock (&ls->mtx); } else { gu_fatal ("Mutex lock failed"); assert (0); abort(); } return ret; } galera-3-25.3.20/galerautils/src/gu_int128.h0000644000015300001660000001252313042054732020135 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file 128-bit arithmetic macros. This is so far needed only for FNV128 * hash algorithm * * $Id$ */ #ifndef _gu_int128_h_ #define _gu_int128_h_ #include "gu_arch.h" #include "gu_byteswap.h" #include #if defined(__SIZEOF_INT128__) typedef int __attribute__((__mode__(__TI__))) int128_t; typedef unsigned int __attribute__((__mode__(__TI__))) uint128_t; typedef int128_t gu_int128_t; typedef uint128_t gu_uint128_t; #define GU_SET128(_a, hi64, lo64) _a = (((uint128_t)hi64) << 64) + lo64 #define GU_MUL128_INPLACE(_a, _b) _a *= _b #define GU_IMUL128_INPLACE(_a, _b) GU_MUL128_INPLACE(_a, _b) #define GU_EQ128(_a, _b) (_a == _b) #else /* Uncapable of 16-byte integer arythmetic */ #if defined(GU_LITTLE_ENDIAN) #define GU_64LO 0 #define GU_64HI 1 #define GU_32LO 0 #define GU_32HI 3 #define GU_32_0 0 #define GU_32_1 1 #define GU_32_2 2 #define GU_32_3 3 typedef union gu_int128 { uint64_t u64[2]; uint32_t u32[4]; struct {uint32_t lo; uint64_t mid; int32_t hi;}__attribute__((packed)) m; #ifdef __cplusplus gu_int128() : m() {} gu_int128(int64_t hi, uint64_t lo) : m() { u64[0] = lo; u64[1] = hi; } #endif } gu_int128_t; typedef union gu_uint128 { uint64_t u64[2]; uint32_t u32[4]; struct {uint32_t lo; uint64_t mid; uint32_t hi;}__attribute__((packed)) m; #ifdef __cplusplus gu_uint128() : m() {} gu_uint128(uint64_t hi, uint64_t lo) : m() { u64[0] = lo; u64[1] = hi; } #endif } gu_uint128_t; #ifdef __cplusplus #define GU_SET128(_a, hi64, lo64) _a = gu_uint128(hi64, lo64) #else #define GU_SET128(_a, hi64, lo64) _a = { .u64 = { lo64, hi64 } } #endif #define GU_MUL128_INPLACE(_a,_b) { \ uint64_t m00 = (uint64_t)(_a).u32[0] * (_b).u32[0]; \ uint64_t m10 = (uint64_t)(_a).u32[1] * (_b).u32[0]; \ uint64_t m20 = (uint64_t)(_a).u32[2] * (_b).u32[0]; \ uint64_t m01 = (uint64_t)(_a).u32[0] * (_b).u32[1]; \ uint64_t m02 = (uint64_t)(_a).u32[0] * (_b).u32[2]; \ uint64_t m11 = (uint64_t)(_a).u32[1] * (_b).u32[1]; \ uint32_t m30 = (_a).u32[3] * (_b).u32[0]; \ uint32_t m21 = (_a).u32[2] * (_b).u32[1]; \ uint32_t m12 = (_a).u32[1] * (_b).u32[2]; \ uint32_t m03 = (_a).u32[0] * (_b).u32[3]; \ (_a).u64[GU_64LO] = m00; (_a).u64[GU_64HI] = 0; \ (_a).m.mid += m10; (_a).m.hi += ((_a).m.mid < m10); \ (_a).m.mid += m01; (_a).m.hi += ((_a).m.mid < m01); \ (_a).u64[GU_64HI] += m20 + m11 + m02; \ (_a).u32[GU_32HI] += m30 + m21 + m12 + m03; \ } #else /* Big-Endian */ #define GU_64HI 0 #define GU_64LO 1 #define GU_32HI 0 #define GU_32LO 3 typedef union gu_int128 { uint64_t u64[2]; uint32_t u32[4]; struct {int32_t hi; uint64_t mid; uint32_t lo;}__attribute__((packed)) m; #ifdef __cplusplus gu_int128() {} gu_int128(int64_t hi, uint64_t lo) { u64[0] = hi; u64[1] = lo; } #endif } gu_int128_t; typedef union gu_uint128 { uint64_t u64[2]; uint32_t u32[4]; struct {uint32_t hi; uint64_t mid; uint32_t lo;}__attribute__((packed)) m; #ifdef __cplusplus gu_uint128() {} gu_uint128(uint64_t hi, uint64_t lo) { u64[0] = hi; u64[1] = lo; } #endif } gu_uint128_t; #ifdef __cplusplus #define GU_SET128(_a, hi64, lo64) _a = gu_uint128(hi64, lo64) #else #define GU_SET128(_a, hi64, lo64) _a = { .u64 = { hi64, lo64 } } #endif #define GU_MUL128_INPLACE(_a,_b) { \ uint64_t m33 = (uint64_t)_a.u32[3] * _b.u32[3]; \ uint64_t m23 = (uint64_t)_a.u32[2] * _b.u32[3]; \ uint64_t m13 = (uint64_t)_a.u32[1] * _b.u32[3]; \ uint64_t m32 = (uint64_t)_a.u32[3] * _b.u32[2]; \ uint64_t m31 = (uint64_t)_a.u32[3] * _b.u32[1]; \ uint64_t m22 = (uint64_t)_a.u32[2] * _b.u32[2]; \ uint32_t m30 = _a.u32[3] * _b.u32[0]; \ uint32_t m21 = _a.u32[2] * _b.u32[1]; \ uint32_t m12 = _a.u32[1] * _b.u32[2]; \ uint32_t m03 = _a.u32[0] * _b.u32[3]; \ _a.u64[GU_64LO] = m00; _a.u64[GU_64HI] = 0; \ _a.m.mid += m23; _a.m.hi += (_a.m.mid < m23); \ _a.m.mid += m32; _a.m.hi += (_a.m.mid < m32); \ _a.u64[GU_64HI] += m13 + m22 + m31; \ _a.u32[GU_32HI] += m30 + m21 + m12 + m03; \ } #endif /* Big-Endian */ #define GU_IMUL128_INPLACE(_a, _b) { \ uint32_t sign = ((_a).u32[GU_32HI] ^ (_b).u32[GU_32HI]) & 0x80000000UL; \ GU_MUL128_INPLACE (_a, _b); \ (_a).u32[GU_32HI] |= sign; \ } #define GU_EQ128(_a, _b) (!memcmp(&_a,&_b,sizeof(_a))) #endif /* __SIZEOF_INT128__ */ /* Not sure how to make it both portable, efficient and still follow the * signature of other byteswap functions at the same time. * So this one does inplace conversion. */ #ifdef __cplusplus extern "C" { #endif static inline void gu_bswap128 (gu_uint128_t* const arg) { uint64_t* x = (uint64_t*)arg; uint64_t tmp = gu_bswap64(x[0]); x[0] = gu_bswap64(x[1]); x[1] = tmp; } #ifdef __cplusplus } #endif #ifdef GU_LITTLE_ENDIAN # define gu_le128(x) {} # define gu_be128(x) gu_bswap128(x) #else # define gu_le128(x) gu_bswap128(x) # define gu_be128(x) {} #endif /* GU_LITTLE_ENDIAN */ #define htog128(x) gu_le128(x) #define gtoh128(x) htog128(x) #endif /* _gu_int128_h_ */ galera-3-25.3.20/galerautils/src/gu_system.h0000644000015300001660000000151213042054732020430 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @system/os/platform dependent functions/macros * * $Id$ */ #ifndef _gu_system_h_ #define _gu_system_h_ #define _GNU_SOURCE // program_invocation_name, program_invocation_short_name #include #include // getexecname, getprogname #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /* See: http://lists.gnu.org/archive/html/bug-gnulib/2010-12/txtrjMzutB7Em.txt * for implementation of GU_SYS_PROGRAM_NAME on other platforms */ #if defined(__sun__) # define GU_SYS_PROGRAM_NAME getexecname () #elif defined(__APPLE__) || defined(__FreeBSD__) # define GU_SYS_PROGRAM_NAME getprogname () #elif defined(__linux__) # define GU_SYS_PROGRAM_NAME program_invocation_name #endif #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_system_h_ */ galera-3-25.3.20/galerautils/src/gu_arch.h0000644000015300001660000000263713042054732020032 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file CPU architecture related functions/macros * * $Id$ */ #ifndef _gu_arch_h_ #define _gu_arch_h_ #if defined(HAVE_ENDIAN_H) # include #elif defined(HAVE_SYS_ENDIAN_H) /* FreeBSD */ # include #elif defined(HAVE_SYS_BYTEORDER_H) # include #elif defined(__APPLE__) # include #else # error "No byte order header file detected" #endif #if defined(__BYTE_ORDER) # if __BYTE_ORDER == __LITTLE_ENDIAN # define GU_LITTLE_ENDIAN # endif #elif defined(_BYTE_ORDER) /* FreeBSD */ # if _BYTE_ORDER == _LITTLE_ENDIAN # define GU_LITTLE_ENDIAN # endif #elif defined(__APPLE__) && defined(__DARWIN_BYTE_ORDER) # if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN # define GU_LITTLE_ENDIAN # endif #elif defined(__sun__) # if !defined(_BIG_ENDIAN) # define GU_LITTLE_ENDIAN # endif #else # error "Byte order not defined" #endif #if defined(__sun__) # if defined (_LP64) # define GU_WORDSIZE 64 # else # define GU_WORDSIZE 32 # endif #elif defined(__APPLE__) || defined(__FreeBSD__) # include # define GU_WORDSIZE __WORDSIZE #else # include # define GU_WORDSIZE __WORDSIZE #endif #if (GU_WORDSIZE != 32) && (GU_WORDSIZE != 64) # error "Unsupported wordsize" #endif /* I'm not aware of the platforms that don't, but still */ #define GU_ALLOW_UNALIGNED_READS 1 #endif /* _gu_arch_h_ */ galera-3-25.3.20/galerautils/src/gu_thread.cpp0000644000015300001660000000473113042054732020714 0ustar jenkinsjenkins// // Copyright (C) 2016 Codership Oy // #include "gu_thread.hpp" #include "gu_utils.hpp" #include "gu_string_utils.hpp" #include "gu_throw.hpp" #include #include static std::string const SCHED_OTHER_STR ("other"); static std::string const SCHED_FIFO_STR ("fifo"); static std::string const SCHED_RR_STR ("rr"); static std::string const SCHED_UNKNOWN_STR("unknown"); static inline void parse_thread_schedparam(const std::string& param, int& policy, int& prio) { std::vector sv(gu::strsplit(param, ':')); if (sv.size() != 2) { gu_throw_error(EINVAL) << "Invalid schedparam: " << param; } if (sv[0] == SCHED_OTHER_STR) policy = SCHED_OTHER; else if (sv[0] == SCHED_FIFO_STR) policy = SCHED_FIFO; else if (sv[0] == SCHED_RR_STR) policy = SCHED_RR; else gu_throw_error(EINVAL) << "Invalid scheduling policy: " << sv[0]; prio = gu::from_string(sv[1]); } gu::ThreadSchedparam gu::ThreadSchedparam::system_default(SCHED_OTHER, 0); gu::ThreadSchedparam::ThreadSchedparam(const std::string& param) : policy_(), prio_ () { if (param == "") { *this = system_default; } else { parse_thread_schedparam(param, policy_, prio_); } } void gu::ThreadSchedparam::print(std::ostream& os) const { std::string policy_str; switch (policy()) { case SCHED_OTHER: policy_str = SCHED_OTHER_STR ; break; case SCHED_FIFO: policy_str = SCHED_FIFO_STR ; break; case SCHED_RR: policy_str = SCHED_RR_STR ; break; default: policy_str = SCHED_UNKNOWN_STR; break; } os << policy_str << ":" << prio(); } gu::ThreadSchedparam gu::thread_get_schedparam(pthread_t thd) { int policy; struct sched_param sp; int err; if ((err = pthread_getschedparam(thd, &policy, &sp)) != 0) { gu_throw_error(err) << "Failed to read thread schedparams"; } return ThreadSchedparam(policy, sp.sched_priority); } void gu::thread_set_schedparam(pthread_t thd, const gu::ThreadSchedparam& sp) { #if defined(__sun__) struct sched_param spstr = { sp.prio(), { 0, } /* sched_pad array */}; #else struct sched_param spstr = { sp.prio() }; #endif int err; if ((err = pthread_setschedparam(thd, sp.policy(), &spstr)) != 0) { gu_throw_error(err) << "Failed to set thread schedparams " << sp; } } galera-3-25.3.20/galerautils/src/gu_byteswap.h0000644000015300001660000000655613042054732020757 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file Byte swapping functions/macros * * $Id$ */ #ifndef _gu_byteswap_h_ #define _gu_byteswap_h_ #include "gu_arch.h" #include "gu_types.h" #include "gu_macros.h" /* * Platform-dependent macros */ #if defined(_MSC_VER) #include #define GU_ROTL32(x,y) _rotl(x,y) #define GU_ROTL64(x,y) _rotl64(x,y) #else /* !defined(_MSC_VER) */ static GU_FORCE_INLINE uint32_t GU_ROTL32 (uint32_t x, int8_t r) { return (x << r) | (x >> (32 - r)); } static GU_FORCE_INLINE uint64_t GU_ROTL64 (uint64_t x, int8_t r) { return (x << r) | (x >> (64 - r)); } #endif /* !defined(_MSC_VER) */ /* * End of paltform-dependent macros */ #if defined(HAVE_BYTESWAP_H) # include // for bswap_16(x), bswap_32(x), bswap_64(x) #elif defined(__APPLE__) # include // for OSSwapInt16(x), etc. #endif /* HAVE_BYTESWAP_H */ #if defined(__APPLE__) /* do not use OSSwapIntXX, because gcc44 gives old-style cast warnings */ # define gu_bswap16 _OSSwapInt16 # define gu_bswap32 _OSSwapInt32 # define gu_bswap64 _OSSwapInt64 #elif defined(__FreeBSD__) /* do not use bswapXX, because gcc44 gives old-style cast warnings */ # define gu_bswap16 __bswap16_var # define gu_bswap32 __bswap32_var # define gu_bswap64 __bswap64_var #elif defined(__sun__) # define gu_bswap16 BSWAP_16 # define gu_bswap32 BSWAP_32 # define gu_bswap64 BSWAP_64 #elif defined(bswap16) # define gu_bswap16 bswap16 # define gu_bswap32 bswap32 # define gu_bswap64 bswap64 #elif defined(bswap_16) # define gu_bswap16 bswap_16 # define gu_bswap32 bswap_32 # define gu_bswap64 bswap_64 #else # error "No byteswap macros are defined" #endif /* @note: there are inline functions behind these macros below, * so typesafety is taken care of... However C++ still has issues: */ #ifdef __cplusplus // To pacify C++. Not loosing much optimization on 2 bytes anyways. #include #undef gu_bswap16 static GU_FORCE_INLINE uint16_t gu_bswap16(uint16_t const x) // Even though x is declared as 'uint16_t', g++-4.4.1 still treats results // of operations with it as 'int' and freaks out on return with -Wconversion. { return static_cast((x >> 8) | (x << 8)); } #endif // __cplusplus #if defined(GU_LITTLE_ENDIAN) /* convert to/from Little Endian representation */ #define gu_le16(x) (x) #define gu_le32(x) (x) #define gu_le64(x) (x) /* convert to/from Big Endian representation */ #define gu_be16(x) gu_bswap16(x) #define gu_be32(x) gu_bswap32(x) #define gu_be64(x) gu_bswap64(x) #else /* Big-Endian */ /* convert to/from Little Endian representation */ #define gu_le16(x) gu_bswap16(x) #define gu_le32(x) gu_bswap32(x) #define gu_le64(x) gu_bswap64(x) /* convert to/from Big Endian representation */ #define gu_be16(x) (x) #define gu_be32(x) (x) #define gu_be64(x) (x) #endif /* Big-Endian */ /* Analogues to htonl and friends. Since we'll be dealing mostly with * little-endian architectures, there is more sense to use little-endian * as default */ #define htogs(x) gu_le16(x) #define gtohs(x) htogs(x) #define htogl(x) gu_le32(x) #define gtohl(x) htogl(x) /* Analogues to htogs() and friends, suffixed with type width */ #define htog16(x) gu_le16(x) #define gtoh16(x) htog16(x) #define htog32(x) gu_le32(x) #define gtoh32(x) htog32(x) #define htog64(x) gu_le64(x) #define gtoh64(x) htog64(x) #endif /* _gu_byteswap_h_ */ galera-3-25.3.20/galerautils/src/gu_string_utils.hpp0000644000015300001660000000176613042054732022205 0ustar jenkinsjenkins// Copyright (C) 2009-2010 Codership Oy #ifndef __GU_STRING_UTILS_HPP__ #define __GU_STRING_UTILS_HPP__ #include #include namespace gu { /*! * @brief Split string into tokens using given separator * * @param sep token separator */ std::vector strsplit(const std::string& s, char sep = ' '); /*! * @brief Split string into tokens using given separator and escape. * * @param sep token separator * @param esc separator escape sequence ('\0' to disable escapes) * @param empty whether to return empty tokens */ std::vector tokenize(const std::string& s, char sep = ' ', char esc = '\\', bool empty = false); /*! Remove non-alnum symbols from the beginning and end of string */ void trim (std::string& s); } #endif /* __GU_STRING_UTILS_HPP__ */ galera-3-25.3.20/galerautils/src/gu_errno.h0000644000015300001660000000131313042054732020230 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #ifndef GU_ERRNO_H #define GU_ERRNO_H #include #if defined(__APPLE__) || defined(__FreeBSD__) # define GU_ELAST ELAST #else /* must be high enough to not collide with system errnos but lower than 256 */ # define GU_ELAST 200 #endif #ifndef EBADFD # define EBADFD (GU_ELAST+1) #endif #ifndef EREMCHG # define EREMCHG (GU_ELAST+2) #endif #ifndef ENOTUNIQ # define ENOTUNIQ (GU_ELAST+3) #endif #ifndef ERESTART # define ERESTART (GU_ELAST+4) #endif #ifndef ENOTRECOVERABLE # define ENOTRECOVERABLE (GU_ELAST+5) #endif #ifndef ENODATA # define ENODATA (GU_ELAST+6) #endif #endif /* GU_STR_H */ galera-3-25.3.20/galerautils/src/gu_unordered.hpp0000644000015300001660000001440313042054732021436 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // //! // @file gu_unordered.hpp unordered_[multi]map definition // // We still have environments where neither boost or std unordered // stuff is available. Wrapper classes are provided for alternate // implementations with standard semantics. // // For usage see either boost or tr1 specifications for unordered_[multi]map // #ifndef GU_UNORDERED_HPP #define GU_UNORDERED_HPP #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) #include #include #elif defined(HAVE_UNORDERED_MAP) #include #include #elif defined(HAVE_TR1_UNORDERED_MAP) #include #include #else #error "no unordered map available" #endif #include "gu_throw.hpp" namespace gu { template class UnorderedHash { public: #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) typedef boost::hash Type; #elif defined(HAVE_UNORDERED_MAP) typedef std::hash Type; #elif defined(HAVE_TR1_UNORDERED_MAP) typedef std::tr1::hash Type; #endif size_t operator()(const K& k) const { return Type()(k); } }; template size_t HashValue(const K& key) { return UnorderedHash()(key); } template , class P = std::equal_to, class A = std::allocator > class UnorderedSet { #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) typedef boost::unordered_set type; #elif defined(HAVE_UNORDERED_MAP) typedef std::unordered_set type; #elif defined(HAVE_TR1_UNORDERED_MAP) typedef std::tr1::unordered_set type; #endif type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedSet() : impl_() { } explicit UnorderedSet(A a) : impl_(a) { } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } std::pair insert(const value_type& k) { return impl_.insert(k); } iterator insert_unique(const value_type& k) { std::pair ret(insert(k)); if (ret.second == false) gu_throw_fatal << "insert unique failed"; return ret.first; } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } iterator erase(iterator i) { return impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } void clear() { impl_.clear(); } void rehash(size_t n) { impl_.rehash(n); } #if defined(HAVE_UNORDERED_MAP) void reserve(size_t n) { impl_.reserve(n); } #endif }; template , class P = std::equal_to, class A = std::allocator > > class UnorderedMap { #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) typedef boost::unordered_map type; #elif defined(HAVE_UNORDERED_MAP) typedef std::unordered_map type; #elif defined(HAVE_TR1_UNORDERED_MAP) typedef std::tr1::unordered_map type; #endif type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedMap() : impl_() { } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } std::pair insert(const std::pair& kv) { return impl_.insert(kv); } iterator insert_unique(const std::pair& kv) { std::pair ret(insert(kv)); if (ret.second == false) gu_throw_fatal << "insert unique failed"; return ret.first; } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } iterator erase(iterator i) { return impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } void clear() { impl_.clear(); } void rehash(size_t n) { impl_.rehash(n); } }; template > class UnorderedMultimap { #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) typedef boost::unordered_multimap type; #elif defined(HAVE_UNORDERED_MAP) typedef std::unordered_multimap type; #elif defined(HAVE_TR1_UNORDERED_MAP) typedef std::tr1::unordered_multimap type; #endif type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedMultimap() : impl_() { } void clear() { impl_.clear(); } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } iterator insert(const std::pair& kv) { return impl_.insert(kv); } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } std::pair equal_range(const K& key) { return impl_.equal_range(key); } std::pair equal_range(const K& key) const { return impl_.equal_range(key); } void erase(iterator i) { impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } }; } #endif // GU_UNORDERED_HPP galera-3-25.3.20/galerautils/src/gu_hexdump.c0000644000015300001660000000362313042054732020556 0ustar jenkinsjenkins// Copyright (C) 2012-2013 Codership Oy /** * @file Functions to dump buffer contents in a readable form * * $Id$ */ #include "gu_hexdump.h" #include "gu_macros.h" #define GU_ASCII_0 0x30 #define GU_ASCII_10 0x3a #define GU_ASCII_A 0x41 #define GU_ASCII_a 0x61 #define GU_ASCII_A_10 (GU_ASCII_A - GU_ASCII_10) #define GU_ASCII_a_10 (GU_ASCII_a - GU_ASCII_10) static GU_FORCE_INLINE int _hex_code (uint8_t const x) { return (x + GU_ASCII_0 + (x > 9)*GU_ASCII_a_10); } static GU_FORCE_INLINE void _write_byte_binary (char* const str, uint8_t const byte) { str[0] = _hex_code(byte >> 4); str[1] = _hex_code(byte & 0x0f); } static GU_FORCE_INLINE void _write_byte_alpha (char* const str, uint8_t const byte) { str[0] = (char)byte; str[1] = '.'; } #define GU_ASCII_ALPHA_START 0x20U /* ' ' */ #define GU_ASCII_ALPHA_END 0x7eU /* '~' */ #define GU_ASCII_ALPHA_INTERVAL (GU_ASCII_ALPHA_END - GU_ASCII_ALPHA_START) static GU_FORCE_INLINE bool _byte_is_alpha (uint8_t const byte) { return (byte - GU_ASCII_ALPHA_START <= GU_ASCII_ALPHA_INTERVAL); } /*! Dumps contents of the binary buffer into a readable form */ void gu_hexdump(const void* buf, ssize_t const buf_size, char* str, ssize_t str_size, bool alpha) { const uint8_t* b = (uint8_t*)buf; ssize_t i; str_size--; /* reserve a space for \0 */ for (i = 0; i < buf_size && str_size > 1;) { if (alpha && _byte_is_alpha (b[i])) _write_byte_alpha (str, b[i]); else _write_byte_binary (str, b[i]); str += 2; str_size -= 2; i++; if (0 == (i % 4) && str_size > 0 && i < buf_size) { /* insert space after every 4 bytes and newline after every 32 */ str[0] = (i % GU_HEXDUMP_BYTES_PER_LINE) ? ' ' : '\n'; str_size--; str++; } } str[0] = '\0'; } galera-3-25.3.20/galerautils/src/gu_abort.c0000644000015300001660000000125613042054732020213 0ustar jenkinsjenkins// Copyright (C) 2011-2013 Codership Oy /** * @file Clean abort function * * $Id$ */ #include "gu_abort.h" #include "gu_system.h" #include "gu_log.h" #include /* for setrlimit() */ #include /* for signal() */ #include /* for abort() */ void gu_abort (void) { /* avoid coredump */ struct rlimit core_limits = { 0, 0 }; setrlimit (RLIMIT_CORE, &core_limits); /* restore default SIGABRT handler */ signal (SIGABRT, SIG_DFL); #if defined(GU_SYS_PROGRAM_NAME) gu_info ("%s: Terminated.", GU_SYS_PROGRAM_NAME); #else gu_info ("Program terminated."); #endif abort(); } galera-3-25.3.20/galerautils/src/gu_prodcons.hpp0000644000015300001660000001021413042054732021272 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id:$ */ /*! * @file gu_prodcons.hpp Synchronous producer/consumer interface */ #include "gu_lock.hpp" // For byte_t #include "gu_buffer.hpp" /* Forward declarations */ namespace gu { namespace prodcons { class MessageData; class Message; class MessageQueue; class Producer; class Consumer; } } class gu::prodcons::MessageData { public: virtual ~MessageData() { } }; /*! * @brief Message class for Producer/Consumer communication */ class gu::prodcons::Message { Producer* producer; /*! Producer associated to this message */ int val; /*! Integer value (command/errno) */ const MessageData* data; public: /*! * @brief Constructor * * @param prod_ Producer associated to the message * @param data_ Message data * @param val_ Integer value associated to the message */ Message(Producer* prod_ = 0, const MessageData* data_ = 0, int val_ = -1) : producer(prod_), val(val_), data(data_) { } Message(const Message& msg) : producer(msg.producer), val(msg.val), data(msg.data) { } Message& operator=(const Message& msg) { producer = msg.producer; val = msg.val; data = msg.data; return *this; } /*! * @brief Get producer associated to the message * * @return Producer associated to the message */ Producer& get_producer() const { return *producer; } /*! * @brief Get data associated to the message * * @return Data associated to the message */ const MessageData* get_data() const { return data; } /*! * @brief Get int value associated to the message * * @return Int value associated to the message */ int get_val() const { return val; } }; /*! * @brief Producer interface */ class gu::prodcons::Producer { gu::Cond cond; /*! Condition variable */ Consumer& cons; /*! Consumer associated to this producer */ /*! * @brief Return reference to condition variable * * @return Reference to condition variable */ Cond& get_cond() { return cond; } friend class Consumer; public: /*! * @brief Consturctor * * @param cons_ Consumer associated to this producer */ Producer(Consumer& cons_) : cond(), cons(cons_) { } /*! * @brief Send message to the consumer and wait for response * * @param[in] msg Message to be sent to consumer * @param[out] ack Ack message returned by the Consumer, containing error code */ void send(const Message& msg, Message* ack); }; /*! * @brief Consumer interface */ class gu::prodcons::Consumer { Mutex mutex; /*! Mutex for internal locking */ MessageQueue* mque; /*! Message queue for producer messages */ MessageQueue* rque; /*! Message queue for ack messages */ Consumer(const Consumer&); void operator=(const Consumer&); protected: /*! * @brief Get the first message from the message queue * * Get the first message from the message queue. Note that * this method does not remove the first message from message queue. * * @return Next message from the message queue */ const Message* get_next_msg(); /*! * @brief Return ack message for the producer * * Return ack message for the producer. Note that this method * pops the first message from the message queue. * * @param msg Ack message corresponding the current head of mque */ void return_ack(const Message& msg); /*! * @brief Virtual method to notify consumer about queued message */ virtual void notify() = 0; public: /*! * @brief Default constructor */ Consumer(); /*! * @brief Default destructor */ virtual ~Consumer(); /*! * @brief Queue message and wait for ack * * @param[in] msg Message to be queued * @param[out] ack Ack returned by consumer */ void queue_and_wait(const Message& msg, Message* ack); }; galera-3-25.3.20/galerautils/src/gu_time.c0000644000015300001660000001442213042054732020041 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file time manipulation functions/macros * * $Id: $ */ #if defined(__APPLE__) #include #include // struct timespec #include // gettimeofday #include // clock_get_time #include // host_get_clock_service #include // mach_absolute_time, mach_timebase_info #include #include "gu_time.h" #define NSEC_PER_SEC 1000000000 #define NSEC_PER_USEC 1000 # if defined(__LP64__) // OS X comm page time offsets // see http://www.opensource.apple.com/source/xnu/xnu-2050.22.13/osfmk/i386/cpu_capabilities.h #define nt_tsc_base "0x50" #define nt_scale "0x58" #define nt_shift "0x5c" #define nt_ns_base "0x60" #define nt_generation "0x68" #define gtod_generation "0x6c" #define gtod_ns_base "0x70" #define gtod_sec_base "0x78" static inline int64_t nanotime (void) { int64_t ntime; __asm volatile ( "mov $0x7fffffe00000, %%rbx;" /* comm page base */ "0:" /* Loop trying to take a consistent snapshot of the time parameters. */ "movl "gtod_generation"(%%rbx), %%r8d;" "testl %%r8d, %%r8d;" "jz 1f;" "movl "nt_generation"(%%rbx), %%r9d;" "testl %%r9d, %%r9d;" "jz 0b;" "rdtsc;" "movq "nt_tsc_base"(%%rbx), %%r10;" "movl "nt_scale"(%%rbx), %%r11d;" "movq "nt_ns_base"(%%rbx), %%r12;" "cmpl "nt_generation"(%%rbx), %%r9d;" "jne 0b;" "movq "gtod_ns_base"(%%rbx), %%r13;" "movq "gtod_sec_base"(%%rbx), %%r14;" "cmpl "gtod_generation"(%%rbx), %%r8d;" "jne 0b;" /* Gathered all the data we need. Compute time. */ /* ((tsc - nt_tsc_base) * nt_scale) >> 32 + nt_ns_base - gtod_ns_base + gtod_sec_base*1e9 */ /* The multiply and shift extracts the top 64 bits of the 96-bit product. */ "shlq $32, %%rdx;" "addq %%rdx, %%rax;" "subq %%r10, %%rax;" "mulq %%r11;" "shrdq $32, %%rdx, %%rax;" "addq %%r12, %%rax;" "subq %%r13, %%rax;" "imulq $1000000000, %%r14;" "addq %%r14, %%rax;" "jmp 2f;" "1:" /* Fall back to system call (usually first call in this thread). */ "movq %%rsp, %%rdi;" /* rdi must be non-nil, unused */ "movq $0, %%rsi;" "movl $(0x2000000+116), %%eax;" /* SYS_gettimeofday */ "syscall; /* may destroy rcx and r11 */" /* sec is in rax, usec in rdx */ /* return nsec in rax */ "imulq $1000000000, %%rax;" "imulq $1000, %%rdx;" "addq %%rdx, %%rax;" "2:" : "=a"(ntime) : /* no input parameters */ : "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14" ); return ntime; } static inline int64_t nanouptime (void) { int64_t ntime; __asm volatile ( "movabs $0x7fffffe00000, %%rbx;" /* comm page base */ "0:" /* Loop trying to take a consistent snapshot of the time parameters. */ "movl "nt_generation"(%%rbx), %%r9d;" "testl %%r9d, %%r9d;" "jz 0b;" "rdtsc;" "movq "nt_tsc_base"(%%rbx), %%r10;" "movl "nt_scale"(%%rbx), %%r11d;" "movq "nt_ns_base"(%%rbx), %%r12;" "cmpl "nt_generation"(%%rbx), %%r9d;" "jne 0b;" /* Gathered all the data we need. Compute time. */ /* ((tsc - nt_tsc_base) * nt_scale) >> 32 + nt_ns_base */ /* The multiply and shift extracts the top 64 bits of the 96-bit product. */ "shlq $32, %%rdx;" "addq %%rdx, %%rax;" "subq %%r10, %%rax;" "mulq %%r11;" "shrdq $32, %%rdx, %%rax;" "addq %%r12, %%rax;" : "=a"(ntime) : /* no input parameters */ : "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", "%r9", "%r10", "%r11", "%r12" ); return ntime; } int clock_gettime (clockid_t clk_id, struct timespec * tp) { int64_t abstime = 0; if (tp == NULL) { return EFAULT; } switch (clk_id) { case CLOCK_REALTIME: abstime = nanotime (); break; case CLOCK_MONOTONIC: abstime = nanouptime (); break; default: errno = EINVAL; return -1; } tp->tv_sec = abstime / (uint64_t)NSEC_PER_SEC; tp->tv_nsec = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC); return 0; } #else /* !__LP64__ */ static struct mach_timebase_info g_mti; int clock_gettime (clockid_t clk_id, struct timespec * tp) { int64_t abstime = 0; mach_timebase_info_data_t mti; /* {uint32_t numer, uint32_t denom} */ if (tp == NULL) { return EFAULT; } switch (clk_id) { case CLOCK_REALTIME: struct timeval tv; if (gettimeofday (&tv, NULL) != 0) { return -1; } tp->tv_sec = tv.tv_sec; tp->tv_nsec = tv.tv_usec * NSEC_PER_USEC; return 0; case CLOCK_MONOTONIC: abstime = mach_absolute_time (); break; default: errno = EINVAL; return -1; } if (g_mti.denom == 0) { struct mach_timebase_info mti; mach_timebase_info (&mti); g_mti.numer = mti.numer; OSMemoryBarrier (); g_mti.denom = mti.denom; } nanos = (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom))); tp->tv_sec = nanos / (uint64_t)NSEC_PER_SEC; tp->tv_nsec = (uint32_t)(nanos % (uint64_t)NSEC_PER_SEC); return 0; } #endif /* !__LP64__ */ #else /* !__APPLE__ */ #ifdef __GNUC__ // error: ISO C forbids an empty translation unit int dummy_var_gu_time; #endif #endif /* __APPLE__ */ galera-3-25.3.20/galerautils/src/gu_throw.hpp0000644000015300001660000000511313042054732020610 0ustar jenkinsjenkins/* * Copyright (C) 2009-2013 Codership Oy * * $Id$ */ /*! * @file Classes to allow throwing more verbose exceptions. Should be only * used from one-line macros below. Concrete classes intended to be final. */ #ifndef __GU_THROW__ #define __GU_THROW__ #include #include #include #include #include "gu_macros.h" #include "gu_exception.hpp" namespace gu { /*! "base" class */ class ThrowBase { protected: const char* const file; const char* const func; int const line; std::ostringstream os; ThrowBase (const char* file_, const char* func_, int line_) : file (file_), func (func_), line (line_), os () {} private: ThrowBase (const ThrowBase&); ThrowBase& operator= (const ThrowBase&); friend class ThrowError; friend class ThrowFatal; }; /* final*/ class ThrowError //: public ThrowBase { public: ThrowError (const char* file_, const char* func_, int line_, int err_) : base (file_, func_, line_), err (err_) {} ~ThrowError() GU_NORETURN { base.os << ": " << err << " (" << ::strerror(err) << ')'; Exception e(base.os.str(), err); e.trace (base.file, base.func, base.line); // cppcheck-suppress exceptThrowInDestructor throw e; } std::ostringstream& msg () { return base.os; } private: ThrowBase base; int const err; }; /* final*/ class ThrowFatal { public: ThrowFatal (const char* file, const char* func, int line) : base (file, func, line) {} ~ThrowFatal () GU_NORETURN { base.os << " (FATAL)"; Exception e(base.os.str(), ENOTRECOVERABLE); e.trace (base.file, base.func, base.line); // cppcheck-suppress exceptThrowInDestructor throw e; } std::ostringstream& msg () { return base.os; } private: ThrowBase base; }; } // Usage: gu_throw_xxxxx << msg1 << msg2 << msg3; #define gu_throw_error(err_) \ gu::ThrowError(__FILE__, __FUNCTION__, __LINE__, err_).msg() #define gu_throw_fatal \ gu::ThrowFatal(__FILE__, __FUNCTION__, __LINE__).msg() #endif // __GU_THROW__ galera-3-25.3.20/galerautils/src/gu_to.h0000644000015300001660000001040413042054732017526 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * @file gu_to.h Public TO monitor API */ #ifndef _gu_to_h_ #define _gu_to_h_ #ifdef __cplusplus extern "C" { #endif #include #include #include /*! @typedef @brief Sequence number type. */ typedef int64_t gu_seqno_t; /*! Total Order object */ typedef struct gu_to gu_to_t; /*! @brief Creates TO object. * TO object can be used to serialize access to application * critical section using sequence number. * * @param len A length of the waiting queue. Should be no less than the * possible maximum number of threads competing for the resource, * but should not be too high either. Perhaps 1024 is good enough * for most applications. * @param seqno A starting sequence number * (the first to be used by gu_to_grab()). * @return Pointer to TO object or NULL in case of error. */ extern gu_to_t* gu_to_create (int len, gu_seqno_t seqno); /*! @brief Destroys TO object. * * @param to A pointer to TO object to be destroyed * @return 0 in case of success, negative code in case of error. * In particular -EBUSY means the object is used by other threads. */ extern long gu_to_destroy (gu_to_t** to); /*! @brief Grabs TO resource in the specified order. * On successful return the mutex associated with specified TO is locked. * Must be released gu_to_release(). @see gu_to_release * * @param to TO resource to be acquired. * @param seqno The order at which TO resouce should be aquired. For any N * gu_to_grab (to, N) will return exactly after * gu_to_release (to, N-1). * @return 0 in case of success, negative code in case of error. * -EAGAIN means that there are too many threads waiting for TO * already. It is safe to try again later. * -ECANCEL if waiter was canceled, seqno is skipped in TO * -EINTR if wait was interrupted, must retry grabbing later */ extern long gu_to_grab (gu_to_t* to, gu_seqno_t seqno); /*! @brief Releases TO specified resource. * On succesful return unlocks the mutex associated with TO. * TO must be previously acquired with gu_to_grab(). @see gu_to_grab * * @param to TO resource that was previously acquired with gu_to_grab(). * @param seqno The same number with which gu_to_grab() was called. * @return 0 in case of success, negative code in case of error. Any error * here is an application error - attempt to release TO resource * out of order (not paired with gu_to_grab()). */ extern long gu_to_release (gu_to_t* to, gu_seqno_t seqno); /*! @brief The last sequence number that had been used to access TO object. * Note that since no locks are held, it is a conservative estimation. * It is guaranteed however that returned seqno is no longer in use. * * @param to A pointer to TO object. * @return GCS sequence number. Since GCS TO sequence starts with 1, this * sequence can start with 0. */ extern gu_seqno_t gu_to_seqno (gu_to_t* to); /*! @brief cancels a TO monitor waiter making it return immediately * It is assumed that the caller is currenly holding the TO. * The to-be-cancelled waiter can be some later transaction but also * some earlier transaction. Tests have shown that the latter case * can also happen. * * @param to A pointer to TO object. * @param seqno Seqno of the waiter object to be cancelled * @return 0 for success and -ERANGE, if trying to cancel an earlier * transaction */ extern long gu_to_cancel (gu_to_t *to, gu_seqno_t seqno); /*! * Self cancel to without attempting to enter critical secion */ extern long gu_to_self_cancel(gu_to_t *to, gu_seqno_t seqno); /*! @brief interrupts from TO monitor waiting state. * Seqno remains valid in the queue and later seqnos still need to * wait for this seqno to be released. * * The caller can (and must) later try gu_to_grab() again or cancel * the seqno with gu_to_self_cancel(). * * @param to A pointer to TO object. * @param seqno Seqno of the waiter object to be interrupted * @return 0 for success and -ERANGE, if trying to interrupt an already * used transaction */ extern long gu_to_interrupt (gu_to_t *to, gu_seqno_t seqno); #ifdef __cplusplus } #endif #endif // _gu_to_h_ galera-3-25.3.20/galerautils/src/gu_reserved_container.hpp0000644000015300001660000001616013042054732023332 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /*! * ReservedContainer template. It is a wrapper for a container and a reserved * buffer to allocate elements from. * * For more rationale see * http://src.chromium.org/chrome/trunk/src/base/containers/stack_container.h * * It is not called "StackContainer" because it is not only for objects * allocated on the stack. * * $Id$ */ #ifndef _GU_RESERVED_CONTAINER_ #define _GU_RESERVED_CONTAINER_ #include "chromium/aligned_memory.h" #include "gu_logger.hpp" #include // size_t, ptrdiff_t and NULL #include // malloc() and free() #include #include // placement new and std::bad_alloc namespace gu { /*! * ReservedAllocator is an allocator for STL containers that can use a * prealocated buffer (supplied at construction time) for initial container * storage allocation. If the number of elements exceeds buffer capacity, it * overflows to heap. * * Unlike the Chromium code, this does not derive from std::allocator, but * implements the whole thing. * * NOTE1: container must support reserve() method. * * NOTE2: it won't work with containers that require allocator to have default * constructor, like std::basic_string */ template class ReservedAllocator { public: typedef chromium::AlignedBuffer Buffer; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; typedef size_t size_type; // making size_type unsigned int does not seem to reduce footprint typedef ptrdiff_t difference_type; template struct rebind { typedef ReservedAllocator other; }; T* address(T& t) const { return &t; } const T* address(const T& t) const { return &t; } size_type max_size() const { return size_type(-1)/2/sizeof(T); } void construct (T* const p, const T& t) const { new (p) T(t); } void destroy (T* const p) const { p->~T(); } // Storage allocated from this can't be deallocated from other bool operator==(const ReservedAllocator& other) const { return (buffer_ == other.buffer_); } bool operator!=(const ReservedAllocator& other) const { return !(*this == other); } ReservedAllocator(Buffer& buf, size_type n = 0) : buffer_(&buf), used_(n) {} ReservedAllocator(const ReservedAllocator& other) : buffer_(other.buffer_), used_(other.used_) { // log_debug << "Copy ctor\n"; } template ReservedAllocator(const ReservedAllocator&) : buffer_(NULL), used_(reserved) { // log_debug << "Rebinding ctor\n"; } ~ReservedAllocator() {} T* allocate(size_type const n, void* hint = NULL) { if (n == 0) return NULL; if (reserved - used_ >= n /* && buffer_ != NULL */) { assert (buffer_ != NULL); if (diagnostic) { log_info << "Allocating " << n << '/' << (reserved - used_) << " from reserve"; } T* const ret(buffer_->base_ptr() + used_); used_ += n; return ret; } if (n <= max_size()) { if (diagnostic) { log_warn << "Allocating " << n << " from heap"; } void* ret = malloc(n * sizeof(T)); if (NULL != ret) return static_cast(ret); } throw std::bad_alloc(); } void deallocate(T* const p, size_type const n) { if (size_type(p - buffer_->base_ptr()) < reserved) { assert (used_ > 0); if (buffer_->base_ptr() + used_ == p + n) { /* last allocated buffer, can shrink */ used_ -= n; } else { /* cannot recycle reserved space in this case */ assert(p + n <= buffer_->base_ptr() + used_); } } else { free(p); } } size_type used() const { return used_; } private: /* even though we initially allocate buffer in ReservedContainer directly * before this, STL containers insist on copying allocators, so we need * a pointer to buffer to be an explicit member (and waste another 8 bytes*/ Buffer* buffer_; size_type used_; ReservedAllocator& operator=(const ReservedAllocator&); }; /* class ReservedAllocator */ /*! * ReservedContainer is a wrapper for * - fixed size nicely aligned buffer * - ReservedAllocator that uses the buffer * - container type that uses allocator * * the point is to have a container allocated on the stack to use stack buffer * for element storage. */ template class ReservedContainer { public: ReservedContainer() : buffer_ (), /* Actual Allocator instance used by container_ should be * copy-constructed from the temporary passed to container ctor. * Copy-construction preserves pointer to buffer, which is not * temporary. This works at least with std::vector */ container_(Allocator(buffer_)) { /* Make the container use most of the buffer by reserving our buffer * size before doing anything else. */ container_.reserve(reserved); } /* * Getters for the actual container. * * Danger: any copies of this made using the copy constructor must have * shorter lifetimes than the source. The copy will share the same allocator * and therefore the same stack buffer as the original. Use std::copy to * copy into a "real" container for longer-lived objects. */ ContainerType& container() { return container_; } const ContainerType& container() const { return container_; } ContainerType& operator()() { return container_; } const ContainerType& operator()() const { return container_; } /* * Support operator-> to get to the container. * This allows nicer syntax like: * ReservedContainer<...> foo; * std::sort(foo->begin(), foo->end()); */ ContainerType* operator->() { return &container_; } const ContainerType* operator->() const { return &container_; } /* For testing only */ typedef typename ContainerType::value_type ContainedType; const ContainedType* reserved_buffer() const { return buffer_.base_ptr(); } private: typedef ReservedAllocator Allocator; typedef typename Allocator::Buffer Buffer; Buffer buffer_; ContainerType container_; /* Note that container will use another instance of Allocator, copy * constructed from allocator_, so any changes won't be re*/ ReservedContainer(const ReservedContainer&); ReservedContainer& operator=(const ReservedContainer&); }; /* class ReservedContainer */ } /* namespace gu */ #endif /* _GU_RESERVED_CONTAINER_ */ galera-3-25.3.20/galerautils/src/gu_time.h0000644000015300001660000000570513042054732020052 0ustar jenkinsjenkins// Copyright (C) 2008 Codership Oy /** * @file time manipulation functions/macros * * $Id$ */ #ifndef _gu_time_h_ #define _gu_time_h_ #include #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** Returns seconds */ static inline double gu_timeval_diff (struct timeval* left, struct timeval* right) { long long diff = left->tv_sec; diff = ((diff - right->tv_sec)*1000000LL) + left->tv_usec - right->tv_usec; return (((double)diff) * 1.0e-06); } static inline void gu_timeval_add (struct timeval* t, double s) { double ret = (double)t->tv_sec + ((double)t->tv_usec) * 1.0e-06 + s; t->tv_sec = (long)ret; t->tv_usec = (long)((ret - (double)t->tv_sec) * 1.0e+06); } static const double SEC_PER_CLOCK = ((double)1.0)/CLOCKS_PER_SEC; /** Returns seconds */ static inline double gu_clock_diff (clock_t left, clock_t right) { return ((double)(left - right)) * SEC_PER_CLOCK; } #include /** * New time interface * * All funcitons return nanoseconds. */ /* Maximum date representable by long long and compatible with timespec */ #define GU_TIME_ETERNITY 9223372035999999999LL #if defined(__APPLE__) /* synced with linux/time.h */ # define CLOCK_REALTIME 0 # define CLOCK_MONOTONIC 1 typedef int clockid_t; int clock_gettime (clockid_t clk_id, struct timespec * tp); #endif /* __APPLE__ */ static inline long long gu_time_getres() { #if _POSIX_TIMERS > 0 struct timespec tmp; clock_getres (CLOCK_REALTIME, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else return 1000LL; // assumed resolution of gettimeofday() in nanoseconds #endif } static inline long long gu_time_calendar() { #if _POSIX_TIMERS > 0 || defined(__APPLE__) struct timespec tmp; clock_gettime (CLOCK_REALTIME, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else struct timeval tmp; gettimeofday (&tmp, NULL); return ((tmp.tv_sec * 1000000000LL) + (tmp.tv_usec * 1000LL)); #endif } static inline long long gu_time_monotonic() { #if defined(_POSIX_MONOTONIC_CLOCK) || defined(__APPLE__) struct timespec tmp; clock_gettime (CLOCK_MONOTONIC, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else struct timeval tmp; gettimeofday (&tmp, NULL); return ((tmp.tv_sec * 1000000000LL) + (tmp.tv_usec * 1000LL)); #endif } #ifdef CLOCK_PROCESS_CPUTIME_ID static inline long long gu_time_process_cputime() { #if _POSIX_TIMERS > 0 struct timespec tmp; clock_gettime (CLOCK_PROCESS_CPUTIME_ID, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else return -1; #endif } #endif /* CLOCK_PROCESS_CPUTIME_ID */ static inline long long gu_time_thread_cputime() { #if _POSIX_TIMERS > 0 struct timespec tmp; clock_gettime (CLOCK_THREAD_CPUTIME_ID, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else return -1; #endif } #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_time_h_ */ galera-3-25.3.20/galerautils/src/gu_profile.hpp0000644000015300001660000002060613042054732021111 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // /*! * @file gu_profile.hpp * * @brief Lightweight profiling utility. * * Profiling utility suitable for getting runtime code profile information * with minimal overhead. Macros profile_enter() and profile_leave() * can be inserted around the code and will be expanded to profiling * code if GU_PROFILE is defined. * * Example usage: * @code * * Profile prof("prof"); * * void func() * { * if (is_true()) * { * profile_enter(prof); // This is line 227 * // Do something * // ... * profile_leave(prof); * } * else * { * profile_enter(prof); // This is line 250 * // Do something else * // ... * profile_leave(prof); * } * } * * // Somewhere else in your code * log_info << prof; * @endcode * */ #ifndef GU_PROFILE_HPP #define GU_PROFILE_HPP #include "gu_time.h" #include "gu_datetime.hpp" #include "gu_lock.hpp" #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) #include #elif defined(HAVE_UNORDERED_MAP) #include #elif defined(HAVE_TR1_UNORDERED_MAP) #include #else #include #endif // HAVE_BOOST_UNORDERED_MAP_HPP #include namespace gu { namespace prof { class Key; class KeyHash; class Point; class Profile; std::ostream& operator<<(std::ostream&, const Key&); std::ostream& operator<<(std::ostream&, const Profile&); } } /*! * Profile key storing human readable point description :: * and entry time. */ class gu::prof::Key { public: Key(const char* const file, const char* const func, const int line) : file_(file), func_(func), line_(line) { } bool operator==(const Key& cmp) const { return (line_ == cmp.line_ && func_ == cmp.func_ && file_ == cmp.file_); } bool operator<(const Key& cmp) const { return (line_ < cmp.line_ || (line_ == cmp.line_ && (func_ < cmp.func_ || (func_ == cmp.func_ && file_ < cmp.file_)))); } std::string to_string() const { std::ostringstream os; os << *this; return os.str(); } private: friend class KeyHash; friend class Point; friend class Profile; friend std::ostream& operator<<(std::ostream& os, const Key&); const char* const file_; const char* const func_; const int line_; }; #ifdef HAVE_BOOST_UNORDERED_MAP_HPP class gu::prof::KeyHash { public: size_t operator()(const Key& key) const { return boost::hash_value(key.file_) ^ boost::hash_value(key.func_) ^ boost::hash_value(key.line_); } }; #endif // HAVE_BOOST_UNORDERED_MAP_HPP inline std::ostream& gu::prof::operator<<(std::ostream& os, const gu::prof::Key& key) { return os << key.file_ << ":" << key.func_ << ":" << key.line_; } class gu::prof::Point { public: Point(const Profile& prof, const char* file, const char* func, const int line); ~Point(); private: friend class Profile; const Profile& prof_; const Key key_; mutable long long int enter_time_calendar_; mutable long long int enter_time_thread_cputime_; }; /*! * Profile class for collecting statistics about profile points. */ class gu::prof::Profile { struct PointStats { PointStats(long long int count = 0, long long int time_calendar = 0, long long int time_thread_cputime = 0) : count_ (count ), time_calendar_ (time_calendar ), time_thread_cputime_(time_thread_cputime) { } PointStats operator+(const PointStats& add) const { return PointStats(count_ + add.count_, time_calendar_ + add.time_calendar_, time_thread_cputime_+ add.time_thread_cputime_); } long long int count_; long long int time_calendar_; long long int time_thread_cputime_; }; #if defined(HAVE_BOOST_UNORDERED_MAP_HPP) typedef boost::unordered_map Map; #elif defined(HAVE_UNORDERED_MAP) typedef std::unordered_map Map; #elif defined(HAVE_TR1_UNORDERED_MAP) typedef std::tr1::unordered_map Map; #else typedef std::map Map; #endif public: /*! * Default constructor. * * @param name_ Name identifying the profile in ostream output. */ Profile(const std::string& name = "profile") : name_(name), start_time_calendar_(gu_time_calendar()), start_time_thread_cputime_(gu_time_thread_cputime()), mutex_(), points_() { } void enter(const Point& point) const { point.enter_time_calendar_ = gu_time_calendar(); point.enter_time_thread_cputime_ = gu_time_thread_cputime(); gu::Lock lock(mutex_); points_[point.key_].count_++; } void leave(const Point& point) const { long long int t_cal(gu_time_calendar()); long long int t_thdcpu(gu_time_thread_cputime()); gu::Lock lock(mutex_); PointStats& pointst(points_[point.key_]); pointst.time_calendar_ += (t_cal - point.enter_time_calendar_); pointst.time_thread_cputime_ += (t_thdcpu - point.enter_time_thread_cputime_); } void clear() const { gu::Lock lock(mutex_); points_.clear(); } friend std::ostream& operator<<(std::ostream&, const Profile&); std::string const name_; long long int const start_time_calendar_; long long int const start_time_thread_cputime_; gu::Mutex mutex_; mutable Map points_; }; inline gu::prof::Point::Point(const Profile& prof, const char* file, const char* func, const int line) : prof_(prof), key_(file, func, line), enter_time_calendar_(), enter_time_thread_cputime_() { prof_.enter(*this); } inline gu::prof::Point::~Point() { prof_.leave(*this); } // // Ostream operator for Profile class. // inline std::ostream& gu::prof::operator<<(std::ostream& os, const Profile& prof) { Profile::PointStats cumul; char prev_fill(os.fill()); os.fill(' '); os << "\nprofile name: " << prof.name_; os << std::left << std::fixed << std::setprecision(3); os << "\n\n"; os << std::setw(40) << "point"; os << std::setw(10) << "count"; os << std::setw(10) << "calendar"; os << std::setw(10) << "cpu"; os << "\n" << std::setfill('-') << std::setw(70) << "" << std::setfill(' ') << "\n"; for (Profile::Map::const_iterator i = prof.points_.begin(); i != prof.points_.end(); ++i) { os << std::setw(40) << std::left << i->first.to_string(); os << std::right; os << std::setw(10) << i->second.count_; os << std::setw(10) << double(i->second.time_calendar_)*1.e-9; os << std::setw(10) << double(i->second.time_thread_cputime_)*1.e-9; os << std::left; os << "\n"; cumul = cumul + i->second; } os << "\ntot count : " << cumul.count_; os << "\ntot calendar time : " << double(cumul.time_calendar_)*1.e-9; os << "\ntot thread cputime: " << double(cumul.time_thread_cputime_)*1.e-9; os << "\ntot ct since ctor : " << double(gu::datetime::Date::now().get_utc() - prof.start_time_calendar_)*1.e-9; os.fill(prev_fill); return os; } // // Convenience macros for defining profile entry and leave points. // If GU_PROFILE is undefined, these macros expand to no-op. // #ifdef GU_PROFILE #define profile_enter(__p) \ do { \ const gu::prof::Point __point((__p), __FILE__, \ __FUNCTION__, __LINE__); \ #define profile_leave(__p) \ } while (0) #else #define profile_enter(__p) #define profile_leave(__p) #endif // GU_PROFILE #endif // GU_PROFILE_HPP galera-3-25.3.20/galerautils/src/gu_uuid.c0000644000015300001660000001151613042054732020052 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Universally Unique IDentifier. RFC 4122. * Time-based implementation. * */ #include "gu_uuid.h" #include "gu_byteswap.h" #include "gu_log.h" #include "gu_assert.h" #include "gu_mutex.h" #include "gu_time.h" #include "gu_rand.h" #include // for rand_r() #include // for memcmp() #include // for fopen() et al #include // for gettimeofday() #include // for getpid() #include // for errno #include const gu_uuid_t GU_UUID_NIL = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}; #define UUID_NODE_LEN 6 /** Returns 64-bit system time in 100 nanoseconds */ static uint64_t uuid_get_time () { static long long check = 0; static gu_mutex_t mtx = GU_MUTEX_INITIALIZER; long long t; gu_mutex_lock (&mtx); do { t = gu_time_calendar() / 100; } while (check == t); check = t; gu_mutex_unlock (&mtx); return (t + 0x01B21DD213814000LL); // offset since the start of 15 October 1582 } #ifndef UUID_URAND // This function can't be called too often, // apparently due to lack of entropy in the pool. /** Fills node part of the uuid with true random data from /dev/urand */ static int uuid_urand_node (uint8_t* node, size_t node_len) { static const char urand_name[] = "/dev/urandom"; FILE* urand; size_t i = 0; int c; urand = fopen (urand_name, "r"); if (NULL == urand) { gu_debug ("Failed to open %s for reading (%d).", urand_name, -errno); return -errno; } while (i < node_len && (c = fgetc (urand)) != EOF) { node[i] = (uint8_t) c; i++; } fclose (urand); return 0; } #else #define uuid_urand_node(a,b) true #endif /** Fills node part with pseudorandom data from rand_r() */ static void uuid_rand_node (uint8_t* node, size_t node_len) { unsigned int seed = gu_rand_seed_int (gu_time_calendar(), node, getpid()); size_t i; for (i = 0; i < node_len; i++) { uint32_t r = (uint32_t) rand_r (&seed); /* combine all bytes into the lowest byte */ node[i] = (uint8_t)((r) ^ (r >> 8) ^ (r >> 16) ^ (r >> 24)); } } static inline void uuid_fill_node (uint8_t* node, size_t node_len) { if (uuid_urand_node (node, node_len)) { uuid_rand_node (node, node_len); } } void gu_uuid_generate (gu_uuid_t* uuid, const void* node, size_t node_len) { assert (NULL != uuid); assert (NULL == node || 0 != node_len); uint32_t* uuid32 = (uint32_t*) uuid->data; uint16_t* uuid16 = (uint16_t*) uuid->data; uint64_t uuid_time = uuid_get_time (); uint16_t clock_seq = gu_rand_seed_int (uuid_time, &GU_UUID_NIL, getpid()); /* time_low */ uuid32[0] = gu_be32 (uuid_time & 0xFFFFFFFF); /* time_mid */ uuid16[2] = gu_be16 ((uuid_time >> 32) & 0xFFFF); /* time_high_and_version */ uuid16[3] = gu_be16 (((uuid_time >> 48) & 0x0FFF) | (1 << 12)); /* clock_seq_and_reserved */ uuid16[4] = gu_be16 ((clock_seq & 0x3FFF) | 0x8000); /* node */ if (NULL != node && 0 != node_len) { memcpy (&uuid->data[10], node, node_len > UUID_NODE_LEN ? UUID_NODE_LEN : node_len); } else { uuid_fill_node (&uuid->data[10], UUID_NODE_LEN); uuid->data[10] |= 0x02; /* mark as "locally administered" */ } return; } /** * Compare two UUIDs * @return -1, 0, 1 if left is respectively less, equal or greater than right */ long gu_uuid_compare (const gu_uuid_t* left, const gu_uuid_t* right) { return memcmp (left, right, sizeof(gu_uuid_t)); } static uint64_t uuid_time (const gu_uuid_t* uuid) { uint64_t uuid_time; /* time_high_and_version */ uuid_time = gu_be16 (((uint16_t*)uuid->data)[3]) & 0x0FFF; /* time_mid */ uuid_time = (uuid_time << 16) + gu_be16 (((uint16_t*)uuid->data)[2]); /* time_low */ uuid_time = (uuid_time << 32) + gu_be32 (((uint32_t*)uuid->data)[0]); return uuid_time; } /** * Compare ages of two UUIDs * @return -1, 0, 1 if left is respectively younger, equal or older than right */ long gu_uuid_older (const gu_uuid_t* left, const gu_uuid_t* right) { uint64_t time_left = uuid_time (left); uint64_t time_right = uuid_time (right); if (time_left < time_right) return 1; if (time_left > time_right) return -1; return 0; } ssize_t gu_uuid_print(const gu_uuid_t* uuid, char* buf, size_t buflen) { if (buflen < GU_UUID_STR_LEN) return -1; return sprintf(buf, GU_UUID_FORMAT, GU_UUID_ARGS(uuid)); } ssize_t gu_uuid_scan(const char* buf, size_t buflen, gu_uuid_t* uuid) { ssize_t ret; if (buflen < GU_UUID_STR_LEN) return -1; ret = sscanf(buf, GU_UUID_FORMAT_SCANF, GU_UUID_ARGS_SCANF(uuid)); if (ret != sizeof(uuid->data)) return -1; return ret; } galera-3-25.3.20/galerautils/src/gu_histogram.hpp0000644000015300001660000000107613042054732021446 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #ifndef _gu_histogram_hpp_ #define _gu_histogram_hpp_ #include #include namespace gu { class Histogram { public: Histogram(const std::string&); void insert(const double); void clear(); friend std::ostream& operator<<(std::ostream&, const Histogram&); std::string to_string() const; private: std::map cnt_; }; std::ostream& operator<<(std::ostream&, const Histogram&); } #endif // _gu_histogram_hpp_ galera-3-25.3.20/galerautils/src/gu_mem.c0000644000015300001660000001011313042054732017652 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy /** * Debugging versions of memmory functions * * $Id$ */ #include #include #include #include "gu_mem.h" #include "gu_log.h" /* Some global counters - can be inspected by gdb */ static volatile ssize_t gu_mem_total = 0; static volatile ssize_t gu_mem_allocs = 0; static volatile ssize_t gu_mem_reallocs = 0; static volatile ssize_t gu_mem_frees = 0; typedef struct mem_head { const char* file; unsigned int line; size_t used; size_t allocated; uint32_t signature; } mem_head_t; #define MEM_SIGNATURE 0x13578642 /**< Our special marker */ // Returns pointer to the first byte after the head structure #define TAIL(head) ((void*)((mem_head_t*)(head) + 1)) // Returns pointer to the head preceding tail #define HEAD(tail) ((mem_head_t*)(tail) - 1) void* gu_malloc_dbg (size_t size, const char* file, unsigned int line) { if (size) { size_t const total_size = size + sizeof(mem_head_t); mem_head_t* const ret = (mem_head_t*) malloc (total_size); if (ret) { gu_mem_total += total_size; gu_mem_allocs++; ret->signature = MEM_SIGNATURE; ret->allocated = total_size; ret->used = size; ret->file = file; ret->line = line; // cppcheck-suppress memleak return TAIL(ret); } } return NULL; } void* gu_calloc_dbg (size_t nmemb, size_t size, const char* file, unsigned int line) { if (size != 0 && nmemb != 0) { size_t const total_size = size*nmemb + sizeof(mem_head_t); mem_head_t* const ret = (mem_head_t*) calloc (total_size, 1); if (ret) { size_t const total_size = size*nmemb + sizeof(mem_head_t); gu_mem_total += total_size; gu_mem_allocs++; ret->signature = MEM_SIGNATURE; ret->allocated = total_size; ret->used = size; ret->file = file; ret->line = line; return TAIL(ret); } } return NULL; } void* gu_realloc_dbg (void* ptr, size_t size, const char* file, unsigned int line) { if (ptr) { if (size > 0) { mem_head_t* const old = HEAD(ptr); if (MEM_SIGNATURE != old->signature) { gu_error ("Attempt to realloc uninitialized pointer at " "file: %s, line: %d", file, line); assert (0); } size_t const total_size = size + sizeof(mem_head_t); mem_head_t* const ret = (mem_head_t*) realloc (old, total_size); if (ret) { gu_mem_reallocs++; gu_mem_total -= ret->allocated; // old size ret->allocated = total_size; gu_mem_total += ret->allocated; // new size ret->used = size; ret->file = file; ret->line = line; return TAIL(ret); } else { // realloc failed return NULL; } } else { gu_free_dbg (ptr, file, line); return NULL; } } else { return gu_malloc_dbg (size, file, line); } return NULL; } void gu_free_dbg (void* ptr, const char* file, unsigned int line) { mem_head_t* head; if (NULL == ptr) { gu_debug ("Attempt to free NULL pointer at file: %s, line: %d", file, line); return; /* As per specification - no operation is performed */ } head = HEAD(ptr); if (MEM_SIGNATURE != head->signature) { gu_error ("Attempt to free uninitialized pointer " "at file: %s, line: %d", file, line); assert (0); } if (0 == head->used) { gu_error ("Attempt to free pointer the second time at " "file: %s, line: %d. " "Was allocated at file: %s, line: %d.", file, line, head->file, head->line); assert (0); } gu_mem_total -= head->allocated; gu_mem_frees++; head->allocated = 0; head->used = 0; free (head); } void gu_mem_stats (ssize_t* total, ssize_t* allocs, ssize_t* reallocs, ssize_t* deallocs) { *total = gu_mem_total; *allocs = gu_mem_allocs; *reallocs = gu_mem_reallocs; *deallocs = gu_mem_frees; } galera-3-25.3.20/galerautils/src/gu_macros.hpp0000644000015300001660000000130713042054732020732 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy /** * @file Miscellaneous C++-related macros * * $Id$ */ #ifndef _gu_macros_hpp_ #define _gu_macros_hpp_ /* To protect against "old-style" casts in libc macros * must be included after respective libc headers */ #if defined(SIG_IGN) extern "C" { static void (* const GU_SIG_IGN)(int) = SIG_IGN; } #endif #if defined(MAP_FAILED) extern "C" { static const void* const GU_MAP_FAILED = MAP_FAILED; } #endif namespace gu { template struct CompileAssert {}; } /* namespace gu */ #define GU_COMPILE_ASSERT(expr,msg) \ typedef gu::CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] __attribute__((unused)) #endif /* _gu_macros_hpp_ */ galera-3-25.3.20/galerautils/src/galerautils.h0000644000015300001660000000132213042054732020724 0ustar jenkinsjenkins// Copyright (C) 2007-2009 Codership Oy /** * @file GaleraUtils main header file * * $Id$ */ #ifndef _galerautils_h_ #define _galerautils_h_ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #include "gu_macros.h" #include "gu_limits.h" #include "gu_byteswap.h" #include "gu_time.h" #include "gu_log.h" #include "gu_conf.h" #include "gu_assert.h" #include "gu_mem.h" #include "gu_mutex.h" #include "gu_dbug.h" #include "gu_fifo.h" #include "gu_uuid.h" #include "gu_to.h" #include "gu_lock_step.h" #include "gu_utils.h" #include "gu_config.h" #include "gu_abort.h" #include "gu_errno.h" #include "gu_atomic.h" #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _galerautils_h_ */ galera-3-25.3.20/galerautils/src/gu_buffer.hpp0000644000015300001660000000062413042054732020720 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ /*! * Byte buffer class. This is thin wrapper to std::vector */ #ifndef GU_BUFFER_HPP #define GU_BUFFER_HPP #include "gu_types.hpp" // for gu::byte_t #include #include namespace gu { typedef std::vector Buffer; typedef boost::shared_ptr SharedBuffer; } #endif // GU_BUFFER_HPP galera-3-25.3.20/galerautils/src/gu_hash.h0000644000015300001660000001007313042054732020031 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file Defines 3 families of standard Galera hash methods * * 1) gu_hash - a general use universal hash: 128, 64 and 32-bit variants. * * 2) gu_fast_hash - optimized for 64-bit Intel CPUs, limited to whole message * only, also comes in 128, 64 and 32-bit flavors. * 3) gu_table_hash - possibly even faster, platform-optimized, globally * inconsistent hash functions to be used only in local hash * tables. Only size_t variants defined. * * 128-bit result is returned through void* parameter as a byte array in * canonical order. * 64/32-bit results are returned as uint64_t/uint32_t integers and thus in host * byte order (require conversion to network/Galera byte order for serialization). * * $Id$ */ #ifndef _gu_hash_h_ #define _gu_hash_h_ #ifdef __cplusplus extern "C" { #endif #include "gu_fnv.h" #include "gu_mmh3.h" #include "gu_spooky.h" /* * General purpose globally consistent _fast_ hash, if in doubt use that. */ /* This is to hash multipart message */ #define gu_hash_t gu_mmh128_ctx_t #define gu_hash_init(_hash) gu_mmh128_init(_hash) #define gu_hash_append(_hash, _msg, _len) gu_mmh128_append(_hash, _msg, _len) #define gu_hash_get128(_hash, _res) gu_mmh128_get(_hash, _res) #define gu_hash_get64(_hash) gu_mmh128_get64(_hash) #define gu_hash_get32(_hash) gu_mmh128_get32(_hash) /* This is to hash a whole message in one go */ #define gu_hash128(_msg, _len, _res) gu_mmh128(_msg, _len, _res) #define gu_hash64(_msg, _len) gu_mmh128_64(_msg, _len) #define gu_hash32(_msg, _len) gu_mmh128_32(_msg, _len) /* * Hash optimized for speed, can't do multipart messages, but should still * be usable as global identifier */ #define GU_SHORT64_LIMIT 16 #define GU_MEDIUM64_LIMIT 512 static GU_INLINE void gu_fast_hash128 (const void* const msg, size_t const len, void* const res) { if (len < GU_MEDIUM64_LIMIT) { gu_mmh128 (msg, len, res); } else { gu_spooky128 (msg, len, res); } } static GU_FORCE_INLINE uint64_t gu_fast_hash64_short (const void* const msg, size_t const len) { uint64_t res = GU_FNV64_SEED; gu_fnv64a_internal (msg, len, &res); /* mix to improve avalanche effect */ res *= GU_ROTL64(res, 56); return res ^ GU_ROTL64(res, 43); } #define gu_fast_hash64_medium gu_mmh128_64 #define gu_fast_hash64_long gu_spooky64 static GU_INLINE uint64_t gu_fast_hash64 (const void* const msg, size_t const len) { if (len < GU_SHORT64_LIMIT) { return gu_fast_hash64_short (msg, len); } else if (len < GU_MEDIUM64_LIMIT) { return gu_fast_hash64_medium (msg, len); } else { return gu_fast_hash64_long (msg, len); } } #define gu_fast_hash32_short gu_mmh32 #define gu_fast_hash32_medium gu_mmh128_32 #define gu_fast_hash32_long gu_spooky32 #define GU_SHORT32_LIMIT 32 #define GU_MEDIUM32_LIMIT 512 static GU_INLINE uint32_t gu_fast_hash32 (const void* const msg, size_t const len) { if (len < GU_SHORT32_LIMIT) { return gu_fast_hash32_short (msg, len); } else if (len < GU_MEDIUM32_LIMIT) { return gu_fast_hash32_medium (msg, len); } else { return gu_fast_hash32_long (msg, len); } } /* * Platform-optimized hashes only for local hash tables, don't produce globally * consistent results. No 128-bit version for obvious reasons. * * Resulting gu_table_hash() will be the fastest hash function returning size_t */ #if GU_WORDSIZE == 64 #define gu_table_hash gu_fast_hash64 /* size_t is normally 64-bit here */ #elif GU_WORDSIZE == 32 /* on 32-bit platform MurmurHash32 is only insignificantly slower than FNV32a * on messages < 10 bytes but produces far better hash. */ #define gu_table_hash gu_mmh32 /* size_t is normally 32-bit here */ #else /* GU_WORDSIZE neither 64 nor 32 bits */ # error Unsupported wordsize! #endif #ifdef __cplusplus } #endif #endif /* _gu_hash_h_ */ galera-3-25.3.20/galerautils/src/gu_config.cpp0000644000015300001660000002673413042054732020721 0ustar jenkinsjenkins// Copyright (C) 2010-2014 Codership Oy /** * @file * Configuration management implementation * * $Id$ */ #include "gu_config.h" #include "gu_config.hpp" #include "gu_logger.hpp" #include "gu_assert.hpp" const char gu::Config::PARAM_SEP = ';'; // parameter separator const char gu::Config::KEY_VALUE_SEP = '='; // key-value separator const char gu::Config::ESCAPE = '\\'; // escape symbol void gu::Config::parse ( std::vector >& params_vector, const std::string& param_list) { assert(params_vector.empty()); // we probably want a clean list if (param_list.empty()) return; std::vector pv = gu::tokenize (param_list, PARAM_SEP, ESCAPE); for (size_t i = 0; i < pv.size(); ++i) { std::vector kvv = gu::tokenize (pv[i], KEY_VALUE_SEP, ESCAPE, true); assert(kvv.size() > 0); gu::trim(kvv[0]); const std::string& key = kvv[0]; if (!key.empty()) { if (kvv.size() == 1) { gu_throw_error(EINVAL) <<"Key without value: '" << key <<"' at position '" << i << "' in parameter list."; } if (kvv.size() > 2) { gu_throw_error(EINVAL) <<"More than one value for key '" << key <<"' at '" << pv[i] << "' in parameter list."; } gu::trim(kvv[1]); std::string& value = kvv[1]; params_vector.push_back(std::make_pair(key, value)); } else if (kvv.size() > 1) { gu_throw_error(EINVAL) << "Empty key at '" << pv[i] << "' in parameter list."; } } } void gu::Config::parse (const std::string& param_list) { if (param_list.empty()) return; std::vector > pv; parse (pv, param_list); bool not_found(false); for (size_t i = 0; i < pv.size(); ++i) { const std::string& key (pv[i].first); const std::string& value(pv[i].second); try { set(key, value); } catch (NotFound& e) { log_error << "Unrecognized parameter '" << key << '\''; /* Throw later so that all invalid parameters get logged.*/ not_found = true; } log_debug << "Set parameter '" << key << "' = '" << value << "'"; } if (not_found) throw gu::NotFound(); } gu::Config::Config() : params_() {} void gu::Config::set_longlong (const std::string& key, long long val) { const char* num_mod = ""; /* Shift preserves sign! */ if (val != 0) { if (!(val & ((1LL << 40) - 1))) { val >>= 40; num_mod = "T"; } else if (!(val & ((1 << 30) - 1))) { val >>= 30; num_mod = "G"; } else if (!(val & ((1 << 20) - 1))) { val >>= 20; num_mod = "M"; } else if (!(val & ((1 << 10) - 1))) { val >>= 10; num_mod = "K"; } } std::ostringstream ost; ost << val << num_mod; set (key, ost.str()); } void gu::Config::check_conversion (const char* str, const char* endptr, const char* type, bool range_error) { if (endptr == str || endptr[0] != '\0' || range_error) { gu_throw_error(EINVAL) << "Invalid value '" << str << "' for " << type << " type."; } } char gu::Config::overflow_char(long long ret) { if (ret >= CHAR_MIN && ret <= CHAR_MAX) return ret; gu_throw_error(EOVERFLOW) << "Value " << ret << " too large for requested type (char)."; } short gu::Config::overflow_short(long long ret) { if (ret >= SHRT_MIN && ret <= SHRT_MAX) return ret; gu_throw_error(EOVERFLOW) << "Value " << ret << " too large for requested type (short)."; } int gu::Config::overflow_int(long long ret) { if (ret >= INT_MIN && ret <= INT_MAX) return ret; gu_throw_error(EOVERFLOW) << "Value " << ret << " too large for requested type (int)."; } void gu::Config::print (std::ostream& os, bool const notset) const { struct _print_param { void operator() (std::ostream& os, bool const notset, param_map_t::const_iterator& pi) { const Parameter& p(pi->second); if (p.is_set() || notset) { os << pi->first << " = " << p.value() << "; "; } } } print_param; for (param_map_t::const_iterator pi(params_.begin()); pi != params_.end(); ++pi) { print_param(os, notset, pi); } } std::ostream& gu::operator<<(std::ostream& ost, const gu::Config& c) { c.print(ost); return ost; } gu_config_t* gu_config_create (void) { try { return (reinterpret_cast(new gu::Config())); } catch (gu::Exception& e) { log_error << "Failed to create configuration object: " << e.what(); return 0; } } void gu_config_destroy (gu_config_t* cnf) { if (cnf) { gu::Config* conf = reinterpret_cast(cnf); delete conf; } else { log_error << "Null configuration object in " << __FUNCTION__; assert (0); } } static int config_check_set_args (gu_config_t* cnf, const char* key, const char* func) { if (cnf && key && key[0] != '\0') return 0; if (!cnf) { log_fatal << "Null configuration object in " << func; } if (!key) { log_fatal << "Null key in " << func; } else if (key[0] == '\0') { log_fatal << "Empty key in " << func; } assert (0); return -EINVAL; } static int config_check_get_args (gu_config_t* cnf, const char* key, const void* val_ptr, const char* func) { if (cnf && key && key[0] != '\0' && val_ptr) return 0; if (!cnf) { log_error << "Null configuration object in " << func; } if (!key) { log_error << "Null key in " << func; } else if (key[0] == '\0') { log_error << "Empty key in " << func; } if (!val_ptr) { log_error << "Null value pointer in " << func; } assert (0); return -EINVAL; } bool gu_config_has (gu_config_t* cnf, const char* key) { if (config_check_set_args (cnf, key, __FUNCTION__)) return false; gu::Config* conf = reinterpret_cast(cnf); return (conf->has (key)); } bool gu_config_is_set (gu_config_t* cnf, const char* key) { if (config_check_set_args (cnf, key, __FUNCTION__)) return false; gu::Config* conf = reinterpret_cast(cnf); return (conf->is_set (key)); } int gu_config_add (gu_config_t* cnf, const char* key, const char* const val) { if (config_check_set_args (cnf, key, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { if (val != NULL) conf->add (key, val); else conf->add (key); return 0; } catch (std::exception& e) { log_error << "Error adding parameter '" << key << "': " << e.what(); return -1; } catch (...) { log_error << "Unknown exception adding parameter '" << key << "'"; return -1; } } int gu_config_get_string (gu_config_t* cnf, const char* key, const char** val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key).c_str(); return 0; } catch (gu::NotFound&) { return 1; } } int gu_config_get_int64 (gu_config_t* cnf, const char* key, int64_t* val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } int gu_config_get_double (gu_config_t* cnf, const char* key, double* val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } int gu_config_get_ptr (gu_config_t* cnf, const char* key, void** val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } int gu_config_get_bool (gu_config_t* cnf, const char* key, bool* val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } #include void gu_config_set_string (gu_config_t* cnf, const char* key, const char* val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); assert (cnf); gu::Config* conf = reinterpret_cast(cnf); conf->set (key, val); } void gu_config_set_int64 (gu_config_t* cnf, const char* key, int64_t val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set (key, val); } void gu_config_set_double (gu_config_t* cnf, const char* key, double val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set(key, val); } void gu_config_set_ptr (gu_config_t* cnf, const char* key, const void* val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set(key, val); } void gu_config_set_bool (gu_config_t* cnf, const char* key, bool val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set(key, val); } ssize_t gu_config_print (gu_config_t* cnf, char* buf, ssize_t buf_len) { std::ostringstream os; os << *(reinterpret_cast(cnf)); const std::string& str = os.str(); strncpy (buf, str.c_str(), buf_len - 1); buf[buf_len - 1] = '\0'; return str.length(); } galera-3-25.3.20/galerautils/src/gu_fifo.h0000644000015300001660000000503513042054732020033 0ustar jenkinsjenkins/* * Copyright (C) 2008-2013 Codership Oy * * Queue (FIFO) class definition * * The driving idea behind this class is avoiding malloc()'s * at all costs on one hand, on the other - make it almost * as infinite as an ordinary linked list. FIFO properties * help to achieve that. * * When needed this FIFO can be made very big, holding * millions or even billions of items while taking up * minimum space when there are few items in the queue. * malloc()'s do happen, but once per thousand of pushes and * allocate multiples of pages, thus reducing memory fragmentation. */ #ifndef _gu_fifo_h_ #define _gu_fifo_h_ #include typedef struct gu_fifo gu_fifo_t; /*! constructor */ extern gu_fifo_t* gu_fifo_create (size_t length, size_t unit); /*! puts FIFO into closed state, waking up waiting threads */ extern void gu_fifo_close (gu_fifo_t *queue); /*! (re)opens FIFO */ extern void gu_fifo_open (gu_fifo_t *queue); /*! destructor - would block until all members are dequeued */ extern void gu_fifo_destroy (gu_fifo_t *queue); /*! for logging purposes */ extern char* gu_fifo_print (gu_fifo_t *queue); /*! Lock FIFO */ extern void gu_fifo_lock (gu_fifo_t *q); /*! Release FIFO */ extern void gu_fifo_release (gu_fifo_t *q); /*! Lock FIFO and get pointer to head item * @param err contains error code if retval is NULL (otherwise - undefined): -ENODATA - queue closed, -ECANCELED - gets were canceled on the queue * @retval pointer to head item or NULL if error occured */ extern void* gu_fifo_get_head (gu_fifo_t* q, int* err); /*! Advance FIFO head pointer and release FIFO. */ extern void gu_fifo_pop_head (gu_fifo_t* q); /*! Lock FIFO and get pointer to tail item */ extern void* gu_fifo_get_tail (gu_fifo_t* q); /*! Advance FIFO tail pointer and release FIFO. */ extern void gu_fifo_push_tail (gu_fifo_t* q); /*! Return how many items are in the queue (unprotected) */ extern long gu_fifo_length (gu_fifo_t* q); /*! Return how many items were in the queue on average per push_tail() */ extern void gu_fifo_stats_get (gu_fifo_t* q, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg); /*! Flush stats counters */ extern void gu_fifo_stats_flush(gu_fifo_t* q); /*! Cancel getters (must be called while holding a FIFO lock) */ extern int gu_fifo_cancel_gets (gu_fifo_t* q); /*! Resume get operations */ extern int gu_fifo_resume_gets (gu_fifo_t* q); #ifndef NDEBUG extern bool gu_fifo_locked (gu_fifo_t* q); #endif #endif // _gu_fifo_h_ galera-3-25.3.20/galerautils/src/gu_spooky.c0000644000015300001660000000044013042054732020422 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file external Spooky hash implementation to avoid code bloat * * $Id$ */ #include "gu_spooky.h" void gu_spooky128_host (const void* const msg, size_t const len, uint64_t* res) { gu_spooky_inline (msg, len, res); } galera-3-25.3.20/galerautils/src/gu_types.h0000644000015300001660000000076313042054732020257 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file Location of some "standard" types definitions * * $Id$ */ #ifndef _gu_types_h_ #define _gu_types_h_ #include /* intXX_t and friends */ #include /* bool */ #include /* ssize_t */ #include /* ptrdiff_t */ #include /* off_t */ #ifdef __cplusplus extern "C" { #endif typedef unsigned char gu_byte_t; #ifdef __cplusplus } #endif #endif /* _gu_types_h_ */ galera-3-25.3.20/galerautils/src/gu_buffer.cpp0000644000015300001660000000013113042054732020704 0ustar jenkinsjenkins// // Copyright (C) 2010 Codership Oy // #include "gu_buffer.hpp" galera-3-25.3.20/galerautils/src/gu_alloc.hpp0000644000015300001660000001233113042054732020537 0ustar jenkinsjenkins/* Copyright (C) 2013-2016 Codership Oy */ /*! * @file Continuous buffer allocator for RecordSet * * $Id$ */ #ifndef _GU_ALLOC_HPP_ #define _GU_ALLOC_HPP_ #include "gu_string.hpp" #include "gu_fdesc.hpp" #include "gu_mmap.hpp" #include "gu_buf.hpp" #include "gu_vector.hpp" #include "gu_macros.h" // gu_likely() #include // realloc(), free() #include #include namespace gu { class Allocator { public: class BaseName { public: virtual void print(std::ostream& os) const = 0; virtual ~BaseName() {} }; // this questionable optimization reduces Allocator size by 8 // probably not worth the loss of generality. typedef unsigned int page_size_type; // max page size typedef page_size_type heap_size_type; // max heap store size explicit Allocator (const BaseName& base_name = BASE_NAME_DEFAULT, byte_t* reserved = NULL, page_size_type reserved_size = 0, heap_size_type max_heap = (1U << 22), /* 4M */ page_size_type disk_page_size = (1U << 26)); /* 64M */ ~Allocator (); /*! @param new_page - true if not adjucent to previous allocation */ byte_t* alloc (page_size_type const size, bool& new_page); /* Total allocated size */ size_t size () const { return size_; } /* Total count of pages */ size_t count() const { return pages_->size(); } #ifdef GU_ALLOCATOR_DEBUG /* appends own vector of Buf structures to the passed one, * should be called only after all allocations have been made. * returns sum of all appended buffers' sizes (same as size()) */ size_t gather (std::vector& out) const; #endif /* GU_ALLOCATOR_DEBUG */ /* After we allocated 3 heap pages, spilling vector into heap should not * be an issue. */ static size_t const INITIAL_VECTOR_SIZE = 4; private: class Page /* base class for memory and file pages */ { public: Page (byte_t* ptr, size_t size) : base_ptr_(ptr), ptr_ (base_ptr_), left_ (size) {} virtual ~Page() {}; byte_t* alloc (size_t size) { byte_t* ret = NULL; if (gu_likely(size <= left_)) { ret = ptr_; ptr_ += size; left_ -= size; } return ret; } const byte_t* base() const { return base_ptr_; } ssize_t size() const { return ptr_ - base_ptr_; } protected: byte_t* base_ptr_; byte_t* ptr_; page_size_type left_; Page& operator=(const Page&); Page (const Page&); }; class HeapPage : public Page { public: HeapPage (page_size_type max_size); ~HeapPage () { free (base_ptr_); } }; class FilePage : public Page { public: FilePage (const std::string& name, page_size_type size); ~FilePage () { fd_.unlink(); } private: FileDescriptor fd_; MMap mmap_; }; class PageStore { public: Page* new_page (page_size_type size) { return my_new_page(size); } protected: virtual ~PageStore() {} private: virtual Page* my_new_page (page_size_type size) = 0; }; class HeapStore : public PageStore { public: HeapStore (heap_size_type max) : PageStore(), left_(max) {} ~HeapStore () {} private: heap_size_type left_; Page* my_new_page (page_size_type const size); }; class FileStore : public PageStore { public: FileStore (const BaseName& base_name, page_size_type page_size) : PageStore(), base_name_(base_name), page_size_(page_size), n_ (0) {} ~FileStore() {} const BaseName& base_name() const { return base_name_; } int size() const { return n_; } private: const BaseName& base_name_; page_size_type const page_size_; int n_; Page* my_new_page (page_size_type const size); FileStore (const FileStore&); FileStore& operator= (const FileStore&); }; Page first_page_; Page* current_page_; HeapStore heap_store_; FileStore file_store_; PageStore* current_store_; gu::Vector pages_; #ifdef GU_ALLOCATOR_DEBUG gu::Vector bufs_; void add_current_to_bufs(); #endif /* GU_ALLOCATOR_DEBUG */ size_t size_; Allocator(const gu::Allocator&); const Allocator& operator=(const gu::Allocator&); class BaseNameDefault : public BaseName { public: BaseNameDefault() {} // this is seemingly required by the standard void print(std::ostream& os) const { os << "alloc"; } }; static BaseNameDefault const BASE_NAME_DEFAULT; }; /* class Allocator */ inline std::ostream& operator<< (std::ostream& os, const Allocator::BaseName& bn) { bn.print(os); return os; } } /* namespace gu */ #endif /* _GU_ALLOC_HPP_ */ galera-3-25.3.20/galerautils/src/gu_monitor.hpp0000644000015300001660000000330613042054732021136 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * @file gu_monitor.hpp * * */ #ifndef __GU_MONITOR_HPP__ #define __GU_MONITOR_HPP__ #include #include namespace gu { class Monitor; class Critical; } class gu::Monitor { int mutable refcnt; Mutex mutex; Cond cond; #ifndef NDEBUG pthread_t mutable holder; #endif // copy contstructor and operator= disabled by mutex and cond members. // but on Darwin, we got an error 'class gu::Monitor' has pointer data members // so make non-copyable explicitly Monitor(const Monitor&); void operator=(const Monitor&); public: #ifndef NDEBUG Monitor() : refcnt(0), mutex(), cond(), holder(0) {} #else Monitor() : refcnt(0), mutex(), cond() {} #endif ~Monitor() {} void enter() const { Lock lock(mutex); // Teemu, pthread_equal() check seems redundant, refcnt too (counted in cond) // while (refcnt > 0 && pthread_equal(holder, pthread_self()) == 0) while (refcnt) { lock.wait(cond); } refcnt++; #ifndef NDEBUG holder = pthread_self(); #endif } void leave() const { Lock lock(mutex); assert(refcnt > 0); assert(pthread_equal(holder, pthread_self()) != 0); refcnt--; if (refcnt == 0) { cond.signal(); } } }; class gu::Critical { const Monitor& mon; Critical (const Critical&); Critical& operator= (const Critical&); public: Critical(const Monitor& m) : mon(m) { mon.enter(); } ~Critical() { mon.leave(); } }; #endif /* __GU_MONITOR_HPP__ */ galera-3-25.3.20/galerautils/src/gu_uri.hpp0000644000015300001660000001525513042054732020254 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy * * $Id$ */ /*! * @file gu_url.hpp * * @brief Utility to parse URIs * * Follows http://tools.ietf.org/html/rfc3986 * * @author Teemu Ollakka */ #ifndef __GU_URI_HPP__ #define __GU_URI_HPP__ #include #include #include #include "gu_utils.hpp" #include "gu_regex.hpp" namespace gu { /*! * @brief URIQueryList * * std::multimap is used to implement query list in URI. * @todo This should be changed to real class having get_key(), * get_value() methods for iterators and to get rid of std::multimap * dependency in header. */ typedef std::multimap URIQueryList; /*! * @brief Utility class to parse URIs */ class URI { public: /*! * @class Helper class for authority list representation. */ class Authority { public: /*! * @brief Get "user" part of authority * * @return user substring * @throws NotSet */ const std::string& user() const { return user_.str(); } /*! * @brief Get "host" part of authority * * @return host substring * @throws NotSet */ const std::string& host() const { return host_.str(); } /*! * @brief Get "port" part of authority * * @return port * @throws NotSet */ const std::string& port() const { return port_.str(); } private: friend class gu::URI; Authority() : user_(), host_(), port_() { } RegEx::Match user_; RegEx::Match host_; RegEx::Match port_; }; typedef std::vector AuthorityList; /*! * @brief Construct URI from string * * @param strict if true, throw exception when scheme is not found, * else use a default one * @throws std::invalid_argument if URI is not valid * @throws std::logic_error in case of internal error * @throws NotSet */ URI (const std::string&, bool strict = true); /*! * @brief Get URI string * @return URI string */ const std::string& to_string() const { if (modified_) recompose(); return str_; } /*! * @brief Get URI scheme * * @return URI scheme (always set) * @throws NotSet */ const std::string& get_scheme() const { return scheme_.str(); } /*! * @brief Get URI authority component * * @return URI authority substring * @throws NotSet */ std::string get_authority() const; /*! * @brief Get "user" part of the first entry in authority list * * @return User substring * @throws NotSet */ const std::string& get_user() const { if (authority_.empty()) throw NotSet(); return authority_.front().user(); } /*! * @brief Get "host" part of the first entry in authority list * * @return Host substring * @throws NotSet */ const std::string& get_host() const { if (authority_.empty()) throw NotSet(); return authority_.front().host(); } /*! * @brief Get "port" part of the first entry in authority list * * @return Port substring * @throws NotSet */ const std::string& get_port() const { if (authority_.empty()) throw NotSet(); return authority_.front().port(); } /*! * @brief Get authority list * * @return Authority list */ const AuthorityList& get_authority_list() const { return authority_; } /*! * @brief Get URI path * * @return URI path (always set) */ const std::string& get_path() const { return path_.str(); } /*! * @brief Get URI path * * @return URI path * @throws NotSet */ const std::string& get_fragment() const { return fragment_.str(); } /*! * @brief Add query param to URI */ void set_query_param(const std::string&, const std::string&, bool override); void set_option(const std::string& key, const std::string& val) { set_query_param(key, val, true); } void append_option(const std::string& key, const std::string& val) { set_query_param(key, val, false); } /*! * @brief Get URI query list */ const URIQueryList& get_query_list() const { return query_list_; } /*! * @brief return opton by name, * @throws NotFound */ const std::string& get_option(const std::string&) const; const std::string& get_option(const std::string& opt, const std::string& def) const { try { return get_option(opt); } catch (NotFound& ) { return def ; } } private: bool modified_; mutable std::string str_; /*! URI string */ RegEx::Match scheme_; /*! URI scheme part */ AuthorityList authority_; RegEx::Match path_; /*! URI path part */ RegEx::Match fragment_; /*! URI fragment part */ URIQueryList query_list_; /*! URI query list */ /*! * @brief Parse URI from str */ void parse (const std::string& s, bool strict); /*! * @brief Recompose URI in str */ void recompose() const; /*! @throws NotSet */ std::string get_authority(const Authority&) const; static const char* const uri_regex_; /*! regexp string to parse URI */ static RegEx const regex_; /*! URI regexp parser */ }; inline std::ostream& operator<<(std::ostream& os, const URI& uri) { os << uri.to_string(); return os; } } #endif /* __GU_URI_HPP__ */ galera-3-25.3.20/galerautils/src/gu_str.h0000644000015300001660000001057413042054732017724 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy */ #ifndef GU_STR_H #define GU_STR_H #include #include #include #include /*! * Append after position */ static inline char* gu_str_append(char* str, size_t* off, const char* app, size_t app_len) { char* tmp; assert(str == NULL || *(str + *off - 1) == '\0'); tmp = realloc(str, *off + app_len + 1); if (tmp != NULL) { memcpy(tmp + *off, app, app_len + 1); *off += app_len + 1; } return tmp; } /*! * Get next string after position */ static inline const char* gu_str_next(const char* str) { return strchr(str, '\0') + 1; } /*! * Advance position starting from over n */ static inline const char* gu_str_advance(const char* str, size_t n) { const char* ptr = str; while (n-- > 0) { ptr = gu_str_next(ptr); } return ptr; } /* * Utilities to construct and scan tables from null terminated strings. * The table format is the following: * * name\0\columns\0\rows\0 * colname0\0colname1\0... * elem00\0elem01\0elem02\0... * elem10\0elem11\0elem\12\... * . * . * . */ static inline char* gu_str_table_set_name(char* str, size_t* off, const char* name) { return gu_str_append(str, off, name, strlen(name)); } static inline const char* gu_str_table_get_name(const char* str) { return str; } static inline char* gu_str_table_append_size(char* str, size_t* off, size_t n) { char buf[10]; size_t len = snprintf(buf, sizeof(buf), "%zu", n); return gu_str_append(str, off, buf, len); } static inline char* gu_str_table_set_n_cols(char* str, size_t* off, size_t n) { return gu_str_table_append_size(str, off, n); } static inline size_t gu_str_table_get_n_cols(const char* str) { str = gu_str_advance(str, 1); return strtoul(str, NULL, 0); } static inline char* gu_str_table_set_n_rows(char* str, size_t* off, size_t n) { return gu_str_table_append_size(str, off, n); } static inline size_t gu_str_table_get_n_rows(const char* str) { str = gu_str_advance(str, 2); return strtoul(str, NULL, 0); } static inline char* gu_str_table_set_cols(char* str, size_t *off, size_t n, const char* cols[]) { size_t i; for (i = 0; i < n; ++i) { str = gu_str_append(str, off, cols[i], strlen(cols[i])); } return str; } static inline char* gu_str_table_append_row(char* str, size_t *off, size_t n, const char* row[]) { size_t i; for (i = 0; i < n; ++i) { str = gu_str_append(str, off, row[i], strlen(row[i])); } return str; } static inline const char* gu_str_table_get_cols(const char* str, size_t n, char const* row[]) { size_t i; str = gu_str_advance(str, 3); for (i = 0; i < n; i++) { row[i] = str; str = gu_str_next(str); } return str; } static inline const char* gu_str_table_rows_begin(const char* str, size_t n) { return gu_str_advance(str, 3 + n); } static inline const char* gu_str_table_row_get(const char* str, size_t n, char const* row[]) { size_t i; for (i = 0; i < n; ++i) { row[i] = str; str = gu_str_next(str); } return str; } static inline void gu_str_table_print_row(FILE* file, size_t n, const char* const row[]) { size_t i; for (i = 0; i < n; ++i) { fprintf(file, "%s ", row[i]); } fprintf(file, "\n"); } static inline void gu_str_table_print(FILE* file, const char* str) { size_t i; size_t n_cols, n_rows; const char* ptr; char const**vec; fprintf(file, "%s\n", gu_str_table_get_name(str)); n_cols = gu_str_table_get_n_cols(str); n_rows = gu_str_table_get_n_rows(str); vec = malloc(n_cols*sizeof(char*)); ptr = gu_str_table_get_cols(str, n_cols, vec); gu_str_table_print_row(file, n_cols, vec); for (i = 0; i < n_rows; ++i) { ptr = gu_str_table_row_get(ptr, n_cols, vec); gu_str_table_print_row(file, n_cols, vec); } free(vec); } #endif /* GU_STR_H */ galera-3-25.3.20/galerautils/src/gu_cond.hpp0000644000015300001660000000310613042054732020370 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy */ #ifndef __GU_COND__ #define __GU_COND__ #include #include #include #include "gu_macros.h" #include "gu_exception.hpp" // TODO: make exceptions more verbose namespace gu { class Cond { friend class Lock; // non-copyable Cond(const Cond&); void operator=(const Cond&); protected: pthread_cond_t mutable cond; long mutable ref_count; public: Cond () : cond(), ref_count(0) { pthread_cond_init (&cond, NULL); } ~Cond () { int ret; while (EBUSY == (ret = pthread_cond_destroy(&cond))) { usleep (100); } if (gu_unlikely(ret != 0)) { log_fatal << "pthread_cond_destroy() failed: " << ret << " (" << strerror(ret) << ". Aborting."; ::abort(); } } inline void signal () const { if (ref_count > 0) { int ret = pthread_cond_signal (&cond); if (gu_unlikely(ret != 0)) throw Exception("pthread_cond_signal() failed", ret); } } inline void broadcast () const { if (ref_count > 0) { int ret = pthread_cond_broadcast (&cond); if (gu_unlikely(ret != 0)) throw Exception("pthread_cond_broadcast() failed", ret); } } }; } #endif // __GU_COND__ galera-3-25.3.20/galerautils/src/gu_rset.cpp0000644000015300001660000002702513042054732020423 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy */ /*! * @file common RecordSet implementation * * Record set is a collection of serialized records of the same type. * * It stores them in an iovec-like collection of buffers before sending * and restores from a single buffer when receiving. * * $Id$ */ #include "gu_rset.hpp" #include "gu_vlq.hpp" #include "gu_hexdump.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_hash.h" #include namespace gu { void RecordSetOutBase::post_alloc (bool const new_page, const byte_t* const ptr, ssize_t const size) { if (new_page) { Buf b = { ptr, size }; bufs_->push_back (b); } else { bufs_->back().size += size; } size_ += size; } void RecordSetOutBase::post_append (bool const new_page, const byte_t* const ptr, ssize_t const size) { check_.append (ptr, size); post_alloc (new_page, ptr, size); } static int check_size (RecordSet::CheckType const ct) { switch (ct) { case RecordSet::CHECK_NONE: return 0; case RecordSet::CHECK_MMH32: return 4; case RecordSet::CHECK_MMH64: return 8; case RecordSet::CHECK_MMH128: return 16; #define MAX_CHECKSUM_SIZE 16 } log_fatal << "Non-existing RecordSet::CheckType value: " << ct; abort(); } #define VER1_CRC_SIZE sizeof(uint32_t) static int header_size_max_v0() { return 1 + /* version + checksum type */ 9 + /* max payload size in vlq format */ 9 + /* max record count in vlq format */ VER1_CRC_SIZE; /* header checksum */ } int RecordSetOutBase::header_size_max() const { switch (version_) { case EMPTY: assert (0); break; case VER1: return header_size_max_v0(); } log_fatal << "Unsupported RecordSet::Version value: " << version_; abort(); } static int header_size_v1(ssize_t size, ssize_t const count) { int hsize = header_size_max_v0(); assert (size > hsize); assert (count > 0); /* need to converge on the header size as it depends on the total size */ do { int new_hsize = 1 + /* version + checksum type */ uleb128_size(size) + /* size in vlq format */ uleb128_size(count) + /* count in vlq format */ VER1_CRC_SIZE; /* header checksum */ assert (new_hsize <= hsize); if (new_hsize == hsize) break; size -= hsize - new_hsize; hsize = new_hsize; } while (true); assert (hsize > 0); assert (size > hsize); return hsize; } int RecordSetOutBase::header_size() const { switch (version_) { case EMPTY: assert(0); break; case VER1: return header_size_v1 (size_, count_); } log_fatal << "Unsupported RecordSet::Version value: " << version_; abort(); } ssize_t RecordSetOutBase::write_header (byte_t* const buf, ssize_t const size) { int const csize(check_size(check_type_)); assert (header_size_max() + csize <= size); ssize_t const hdr_offset(header_size_max() - header_size()); assert (hdr_offset >= 0); size_ -= hdr_offset; int off(hdr_offset); buf[off] = (static_cast(version_) << 4) | /* upper 4 bytes: ver */ (static_cast(check_type_) & 0x0f); off += 1; off += uleb128_encode(size_, buf + off, size - off); off += uleb128_encode(count_, buf + off, size - off); /* write header CRC */ uint32_t const crc(gu_fast_hash32(buf + hdr_offset, off - hdr_offset)); *(reinterpret_cast(buf + off)) = htog(crc); off += VER1_CRC_SIZE; /* append payload checksum */ if (check_type_ != CHECK_NONE) { assert (csize <= size - off); check_.append (buf + hdr_offset, off - hdr_offset); /* append header */ check_.gather (buf + off, csize); } return hdr_offset; } ssize_t RecordSetOutBase::gather (GatherVector& out) { if (count_) { byte_t* const ptr = reinterpret_cast(const_cast(bufs_->front().ptr)); ssize_t const offset = write_header (ptr, bufs_->front().size); bufs_->front().ptr = ptr + offset; bufs_->front().size -= offset; // size_ is taken care of in write_header() out->insert (out->end(), bufs_->begin(), bufs_->end()); return size_; } else { return 0; } } RecordSet::RecordSet (Version ver, CheckType const ct) : size_ (0), count_ (0), version_ (ver), check_type_(ct) { if (gu_unlikely(uint(version_) > MAX_VERSION)) { gu_throw_error (EPROTO) << "Unsupported header version: " << version_; } } RecordSetOutBase::RecordSetOutBase (byte_t* reserved, size_t reserved_size, const BaseName& base_name, CheckType const ct, Version const version #ifdef GU_RSET_CHECK_SIZE ,ssize_t const max_size #endif ) : RecordSet (version, ct), #ifdef GU_RSET_CHECK_SIZE max_size_ (max_size), #endif alloc_ (base_name, reserved, reserved_size), check_ (), bufs_ (), prev_stored_(true) { /* reserve space for header */ size_ = header_size_max() + check_size(check_type_); bool unused; byte_t* ptr = alloc_.alloc (size_, unused); Buf b = { ptr, size_ }; bufs_->push_back (b); } static inline RecordSet::Version header_version (const byte_t* buf, ssize_t const size) { assert (NULL != buf); assert (size > 0); uint const ver((buf[0] & 0xf0) >> 4); assert (ver > 0); if (gu_likely(ver <= RecordSet::MAX_VERSION)) return static_cast(ver); gu_throw_error (EPROTO) << "Unsupported RecordSet version: " << ver; } static inline RecordSet::CheckType ver1_check_type (const byte_t* buf, ssize_t const size) { assert (size > 0); int const ct(buf[0] & 0x0f); switch (ct) { case RecordSet::CHECK_NONE: return RecordSet::CHECK_NONE; case RecordSet::CHECK_MMH32: return RecordSet::CHECK_MMH32; case RecordSet::CHECK_MMH64: return RecordSet::CHECK_MMH64; case RecordSet::CHECK_MMH128: return RecordSet::CHECK_MMH128; } gu_throw_error (EPROTO) << "Unsupported RecordSet checksum type: " << ct; } static inline RecordSet::CheckType header_check_type(RecordSet::Version ver, const byte_t* ptr, ssize_t const size) { assert (size > 0); switch (ver) { case RecordSet::EMPTY: assert(0); return RecordSet::CHECK_NONE; case RecordSet::VER1: return ver1_check_type (ptr, size); } gu_throw_error (EPROTO) << "Unsupported RecordSet version: " << ver; } void RecordSet::init (const byte_t* const ptr, ssize_t const size) { assert (EMPTY == version_); assert (size >= 0); assert (NULL != ptr || 0 == size); assert (NULL == ptr || 0 != size); if (gu_likely ((ptr && size))) { version_ = header_version (ptr, size); check_type_ = header_check_type (version_, ptr, size); } } void RecordSetInBase::parse_header_v1 (size_t const size) { assert (size > 1); int off = 1; off += uleb128_decode (head_ + off, size - off, size_); if (gu_unlikely(static_cast(size_) > static_cast(size))) { gu_throw_error (EPROTO) << "RecordSet size " << size_ << " exceeds buffer size " << size << "\nfirst 4 bytes: " << gu::Hexdump(head_, 4); } off += uleb128_decode (head_ + off, size - off, count_); if (gu_unlikely(static_cast(size_) < static_cast(count_))) { gu_throw_error (EPROTO) << "Corrupted RecordSet header: count " << count_ << " exceeds size " << size_; } /* verify header CRC */ uint32_t const crc_comp(gu_fast_hash32(head_, off)); uint32_t const crc_orig( gtoh(*(reinterpret_cast(head_ + off)))); if (gu_unlikely(crc_comp != crc_orig)) { gu_throw_error (EPROTO) << "RecordSet header CRC mismatch: " << std::showbase << std::internal << std::hex << std::setfill('0') << std::setw(10) << "\ncomputed: " << crc_comp << "\nfound: " << crc_orig << std::dec; } off += VER1_CRC_SIZE; /* checksum is between header and records */ begin_ = off + check_size(check_type_); } /* returns false if checksum matched and true if failed */ void RecordSetInBase::checksum() const { int const cs(check_size(check_type_)); if (cs > 0) /* checksum records */ { Hash check; check.append (head_ + begin_, size_ - begin_); /* records */ check.append (head_, begin_ - cs); /* header */ assert(cs <= MAX_CHECKSUM_SIZE); byte_t result[MAX_CHECKSUM_SIZE]; check.gather(result); const byte_t* const stored_checksum(head_ + begin_ - cs); if (gu_unlikely(memcmp (result, stored_checksum, cs))) { gu_throw_error(EINVAL) << "RecordSet checksum does not match:" << "\ncomputed: " << gu::Hexdump(result, cs) << "\nfound: " << gu::Hexdump(stored_checksum, cs); } } } uint64_t RecordSetInBase::get_checksum() const { unsigned int const checksum_size(check_size(check_type_)); const void* const stored_checksum(head_ + begin_ - checksum_size); uint64_t ret(0); if (checksum_size >= sizeof(uint64_t)) ret = *(static_cast(stored_checksum)); else if (checksum_size >= sizeof(uint32_t)) ret = *(static_cast(stored_checksum)); else if (checksum_size >= sizeof(uint16_t)) ret = *(static_cast(stored_checksum)); else if (checksum_size >= sizeof(uint8_t)) ret = *(static_cast(stored_checksum)); return gu::gtoh(ret); } RecordSetInBase::RecordSetInBase (const byte_t* const ptr, size_t const size, bool const check_now) : RecordSet (), head_ (), next_ (), begin_ () { init (ptr, size, check_now); } void RecordSetInBase::init (const byte_t* const ptr, size_t const size, bool const check_now) { assert (EMPTY == version_); RecordSet::init (ptr, size); head_ = ptr; switch (version_) { case EMPTY: return; case VER1: parse_header_v1(size); // should set begin_ } if (check_now) checksum(); next_ = begin_; assert (size_ > 0); assert (count_ >= 0); assert (count_ <= size_); assert (begin_ > 0); assert (begin_ <= size_); assert (next_ == begin_); } void RecordSetInBase::throw_error (Error code) const { switch (code) { case E_PERM: gu_throw_error (EPERM) << "Access beyond record set end."; case E_FAULT: gu_throw_error (EFAULT) << "Corrupted record set: record extends " << next_ << " beyond set boundary " << size_; } log_fatal << "Unknown error in RecordSetIn."; abort(); } } /* namespace gu */ galera-3-25.3.20/galerautils/src/gu_spooky.h0000644000015300001660000003255413042054732020442 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /*! * @file Spooky hash by Bob Jenkins: * http://www.burtleburtle.net/bob/c/spooky.h * * Original author comments preserved in C++ style. * Original code is public domain * * $Id$ */ #ifndef _gu_spooky_h_ #define _gu_spooky_h_ #include "gu_types.h" #include "gu_byteswap.h" #ifdef __cplusplus extern "C" { #endif #include // for memcpy() /*! GCC complains about 'initializer element is not constant', hence macros */ #define _spooky_numVars 12 #define _spooky_blockSize 96 /* (_spooky_numVars * 8) */ #define _spooky_bufSize 192 /* (_spooky_blockSize * 2) */ static uint64_t const _spooky_const = GU_ULONG_LONG(0xDEADBEEFDEADBEEF); // // This is used if the input is 96 bytes long or longer. // // The internal state is fully overwritten every 96 bytes. // Every input bit appears to cause at least 128 bits of entropy // before 96 other bytes are combined, when run forward or backward // For every input bit, // Two inputs differing in just that input bit // Where "differ" means xor or subtraction // And the base value is random // When run forward or backwards one Mix // I tried 3 pairs of each; they all differed by at least 212 bits. // static GU_FORCE_INLINE void _spooky_mix( const uint64_t *data, uint64_t* s0, uint64_t* s1, uint64_t* s2, uint64_t* s3, uint64_t* s4, uint64_t* s5, uint64_t* s6, uint64_t* s7, uint64_t* s8, uint64_t* s9, uint64_t* sA, uint64_t* sB) { *s0 += gu_le64(data[0]); *s2 ^= *sA; *sB ^= *s0; *s0 =GU_ROTL64(*s0,11); *sB += *s1; *s1 += gu_le64(data[1]); *s3 ^= *sB; *s0 ^= *s1; *s1 =GU_ROTL64(*s1,32); *s0 += *s2; *s2 += gu_le64(data[2]); *s4 ^= *s0; *s1 ^= *s2; *s2 =GU_ROTL64(*s2,43); *s1 += *s3; *s3 += gu_le64(data[3]); *s5 ^= *s1; *s2 ^= *s3; *s3 =GU_ROTL64(*s3,31); *s2 += *s4; *s4 += gu_le64(data[4]); *s6 ^= *s2; *s3 ^= *s4; *s4 =GU_ROTL64(*s4,17); *s3 += *s5; *s5 += gu_le64(data[5]); *s7 ^= *s3; *s4 ^= *s5; *s5 =GU_ROTL64(*s5,28); *s4 += *s6; *s6 += gu_le64(data[6]); *s8 ^= *s4; *s5 ^= *s6; *s6 =GU_ROTL64(*s6,39); *s5 += *s7; *s7 += gu_le64(data[7]); *s9 ^= *s5; *s6 ^= *s7; *s7 =GU_ROTL64(*s7,57); *s6 += *s8; *s8 += gu_le64(data[8]); *sA ^= *s6; *s7 ^= *s8; *s8 =GU_ROTL64(*s8,55); *s7 += *s9; *s9 += gu_le64(data[9]); *sB ^= *s7; *s8 ^= *s9; *s9 =GU_ROTL64(*s9,54); *s8 += *sA; *sA += gu_le64(data[10]); *s0 ^= *s8; *s9 ^= *sA; *sA =GU_ROTL64(*sA,22); *s9 += *sB; *sB += gu_le64(data[11]); *s1 ^= *s9; *sA ^= *sB; *sB =GU_ROTL64(*sB,46); *sA += *s0; } // // Mix all 12 inputs together so that h0, h1 are a hash of them all. // // For two inputs differing in just the input bits // Where "differ" means xor or subtraction // And the base value is random, or a counting value starting at that bit // The final result will have each bit of h0, h1 flip // For every input bit, // with probability 50 +- .3% // For every pair of input bits, // with probability 50 +- 3% // // This does not rely on the last Mix() call having already mixed some. // Two iterations was almost good enough for a 64-bit result, but a // 128-bit result is reported, so End() does three iterations. // static GU_FORCE_INLINE void _spooky_end_part( uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3, uint64_t* h4, uint64_t* h5, uint64_t* h6, uint64_t* h7, uint64_t* h8, uint64_t* h9, uint64_t* h10,uint64_t* h11) { *h11+= *h1; *h2 ^= *h11; *h1 = GU_ROTL64(*h1,44); *h0 += *h2; *h3 ^= *h0; *h2 = GU_ROTL64(*h2,15); *h1 += *h3; *h4 ^= *h1; *h3 = GU_ROTL64(*h3,34); *h2 += *h4; *h5 ^= *h2; *h4 = GU_ROTL64(*h4,21); *h3 += *h5; *h6 ^= *h3; *h5 = GU_ROTL64(*h5,38); *h4 += *h6; *h7 ^= *h4; *h6 = GU_ROTL64(*h6,33); *h5 += *h7; *h8 ^= *h5; *h7 = GU_ROTL64(*h7,10); *h6 += *h8; *h9 ^= *h6; *h8 = GU_ROTL64(*h8,13); *h7 += *h9; *h10^= *h7; *h9 = GU_ROTL64(*h9,38); *h8 += *h10; *h11^= *h8; *h10= GU_ROTL64(*h10,53); *h9 += *h11; *h0 ^= *h9; *h11= GU_ROTL64(*h11,42); *h10+= *h0; *h1 ^= *h10; *h0 = GU_ROTL64(*h0,54); } static GU_FORCE_INLINE void _spooky_end( uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3, uint64_t* h4, uint64_t* h5, uint64_t* h6, uint64_t* h7, uint64_t* h8, uint64_t* h9, uint64_t* h10,uint64_t* h11) { #if 0 _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); #endif int i; for (i = 0; i < 3; i++) { _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); } } // // The goal is for each bit of the input to expand into 128 bits of // apparent entropy before it is fully overwritten. // n trials both set and cleared at least m bits of h0 h1 h2 h3 // n: 2 m: 29 // n: 3 m: 46 // n: 4 m: 57 // n: 5 m: 107 // n: 6 m: 146 // n: 7 m: 152 // when run forwards or backwards // for all 1-bit and 2-bit diffs // with diffs defined by either xor or subtraction // with a base of all zeros plus a counter, or plus another bit, or random // static GU_FORCE_INLINE void _spooky_short_mix(uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3) { *h2 = GU_ROTL64(*h2,50); *h2 += *h3; *h0 ^= *h2; *h3 = GU_ROTL64(*h3,52); *h3 += *h0; *h1 ^= *h3; *h0 = GU_ROTL64(*h0,30); *h0 += *h1; *h2 ^= *h0; *h1 = GU_ROTL64(*h1,41); *h1 += *h2; *h3 ^= *h1; *h2 = GU_ROTL64(*h2,54); *h2 += *h3; *h0 ^= *h2; *h3 = GU_ROTL64(*h3,48); *h3 += *h0; *h1 ^= *h3; *h0 = GU_ROTL64(*h0,38); *h0 += *h1; *h2 ^= *h0; *h1 = GU_ROTL64(*h1,37); *h1 += *h2; *h3 ^= *h1; *h2 = GU_ROTL64(*h2,62); *h2 += *h3; *h0 ^= *h2; *h3 = GU_ROTL64(*h3,34); *h3 += *h0; *h1 ^= *h3; *h0 = GU_ROTL64(*h0,5); *h0 += *h1; *h2 ^= *h0; *h1 = GU_ROTL64(*h1,36); *h1 += *h2; *h3 ^= *h1; } // // Mix all 4 inputs together so that h0, h1 are a hash of them all. // // For two inputs differing in just the input bits // Where "differ" means xor or subtraction // And the base value is random, or a counting value starting at that bit // The final result will have each bit of h0, h1 flip // For every input bit, // with probability 50 +- .3% (it is probably better than that) // For every pair of input bits, // with probability 50 +- .75% (the worst case is approximately that) // static GU_FORCE_INLINE void _spooky_short_end(uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3) { *h3 ^= *h2; *h2 = GU_ROTL64(*h2,15); *h3 += *h2; *h0 ^= *h3; *h3 = GU_ROTL64(*h3,52); *h0 += *h3; *h1 ^= *h0; *h0 = GU_ROTL64(*h0,26); *h1 += *h0; *h2 ^= *h1; *h1 = GU_ROTL64(*h1,51); *h2 += *h1; *h3 ^= *h2; *h2 = GU_ROTL64(*h2,28); *h3 += *h2; *h0 ^= *h3; *h3 = GU_ROTL64(*h3,9); *h0 += *h3; *h1 ^= *h0; *h0 = GU_ROTL64(*h0,47); *h1 += *h0; *h2 ^= *h1; *h1 = GU_ROTL64(*h1,54); *h2 += *h1; *h3 ^= *h2; *h2 = GU_ROTL64(*h2,32); *h3 += *h2; *h0 ^= *h3; *h3 = GU_ROTL64(*h3,25); *h0 += *h3; *h1 ^= *h0; *h0 = GU_ROTL64(*h0,63); *h1 += *h0; } // // short hash ... it could be used on any message, // but it's used by Spooky just for short messages. // static GU_INLINE void gu_spooky_short_host( const void* const message, size_t const length, uint64_t* const hash) { union { const uint8_t* p8; uint32_t* p32; uint64_t* p64; #if !GU_ALLOW_UNALIGNED_READS size_t i; #endif /* !GU_ALLOW_UNALIGNED_READS */ } u; u.p8 = (const uint8_t *)message; #if !GU_ALLOW_UNALIGNED_READS if (u.i & 0x7) { uint64_t buf[_spooky_numVars << 1]; memcpy(buf, message, length); u.p64 = buf; } #endif /* !GU_ALLOW_UNALIGNED_READS */ size_t remainder = length & 0x1F; /* length%32 */ /* author version : */ // uint64_t a = gu_le64(*hash[0]); // uint64_t b = gu_le64(*hash[1]); /* consistent seed version: */ uint64_t a = 0; uint64_t b = 0; uint64_t c = _spooky_const; uint64_t d = _spooky_const; if (length > 15) { const uint64_t *end = u.p64 + ((length >> 5) << 2); /* (length/32)*4 */ // handle all complete sets of 32 bytes for (; u.p64 < end; u.p64 += 4) { c += gu_le64(u.p64[0]); d += gu_le64(u.p64[1]); _spooky_short_mix(&a, &b, &c, &d); a += gu_le64(u.p64[2]); b += gu_le64(u.p64[3]); } //Handle the case of 16+ remaining bytes. if (remainder >= 16) { c += gu_le64(u.p64[0]); d += gu_le64(u.p64[1]); _spooky_short_mix(&a, &b, &c, &d); u.p64 += 2; remainder -= 16; } } // Handle the last 0..15 bytes, and its length d = ((uint64_t)length) << 56; switch (remainder) { case 15: d += ((uint64_t)u.p8[14]) << 48; case 14: d += ((uint64_t)u.p8[13]) << 40; case 13: d += ((uint64_t)u.p8[12]) << 32; case 12: d += gu_le32(u.p32[2]); c += gu_le64(u.p64[0]); break; case 11: d += ((uint64_t)u.p8[10]) << 16; case 10: d += ((uint64_t)u.p8[9]) << 8; case 9: d += (uint64_t)u.p8[8]; case 8: c += gu_le64(u.p64[0]); break; case 7: c += ((uint64_t)u.p8[6]) << 48; case 6: c += ((uint64_t)u.p8[5]) << 40; case 5: c += ((uint64_t)u.p8[4]) << 32; case 4: c += gu_le32(u.p32[0]); break; case 3: c += ((uint64_t)u.p8[2]) << 16; case 2: c += ((uint64_t)u.p8[1]) << 8; case 1: c += (uint64_t)u.p8[0]; break; case 0: c += _spooky_const; d += _spooky_const; } _spooky_short_end(&a, &b, &c, &d); // @note - in native-endian order! hash[0] = a; hash[1] = b; } static GU_FORCE_INLINE void gu_spooky_short( const void* message, size_t length, void* const hash) { uint64_t* const u64 = (uint64_t*)hash; gu_spooky_short_host(message, length, u64); u64[0] = gu_le64(u64[0]); u64[1] = gu_le64(u64[1]); } // do the whole hash in one call static GU_INLINE void gu_spooky_inline ( const void* const message, size_t const length, uint64_t* const hash) { #ifdef GU_USE_SPOOKY_SHORT if (length < _spooky_bufSize) { gu_spooky_short_base (message, length, hash); return; } #endif /* GU_USE_SPOOKY_SHORT */ uint64_t h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11; uint64_t buf[_spooky_numVars]; uint64_t* end; union { const uint8_t* p8; uint64_t* p64; #if !GU_ALLOW_UNALIGNED_READS size_t i; #endif /* !GU_ALLOW_UNALIGNED_READS */ } u; size_t remainder; /* this is how the author wants it: a possibility for different seeds h0=h3=h6=h9 = gu_le64(hash[0]); h1=h4=h7=h10 = gu_le64(hash[1]); * this is how we want it - constant seed */ h0=h3=h6=h9 = 0; h1=h4=h7=h10 = 0; h2=h5=h8=h11 = _spooky_const; u.p8 = (const uint8_t*) message; end = u.p64 + (length/_spooky_blockSize)*_spooky_numVars; // handle all whole _spooky_blockSize blocks of bytes #if !GU_ALLOW_UNALIGNED_READS if ((u.i & 0x7) == 0) { #endif /* !GU_ALLOW_UNALIGNED_READS */ while (u.p64 < end) { _spooky_mix(u.p64, &h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); u.p64 += _spooky_numVars; } #if !GU_ALLOW_UNALIGNED_READS } else { while (u.p64 < end) { memcpy(buf, u.p64, _spooky_blockSize); _spooky_mix(buf, &h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); u.p64 += _spooky_numVars; } } #endif /* !GU_ALLOW_UNALIGNED_READS */ // handle the last partial block of _spooky_blockSize bytes remainder = (length - ((const uint8_t*)end - (const uint8_t*)message)); memcpy(buf, end, remainder); memset(((uint8_t*)buf) + remainder, 0, _spooky_blockSize - remainder); ((uint8_t*)buf)[_spooky_blockSize - 1] = remainder; _spooky_mix(buf, &h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); // do some final mixing _spooky_end(&h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); /*! @note: in native order */ hash[0] = h0; hash[1] = h1; } /* As is apparent from the gu_spooky_inline(), Spooky hash is enormous. * Since it has advantage only on long messages, it makes sense to make it * a regular function to avoid code bloat. * WARNING: does not do final endian conversion! */ extern void gu_spooky128_host (const void* const msg, size_t const len, uint64_t* res); /* returns hash in the canonical byte order, as a byte array */ static GU_FORCE_INLINE void gu_spooky128 (const void* const msg, size_t const len, void* const res) { uint64_t* const r = (uint64_t*)res; gu_spooky128_host (msg, len, r); r[0] = gu_le64(r[0]); r[1] = gu_le64(r[1]); } /* returns hash as an integer, in host byte-order */ static GU_FORCE_INLINE uint64_t gu_spooky64 (const void* const msg, size_t const len) { uint64_t res[2]; gu_spooky128_host (msg, len, res); return res[0]; } /* returns hash as an integer, in host byte-order */ static GU_FORCE_INLINE uint32_t gu_spooky32 (const void* const msg, size_t const len) { uint64_t res[2]; gu_spooky128_host (msg, len, res); return (uint32_t)res[0]; } #ifdef __cplusplus } #endif #endif /* _gu_spooky_h_ */ galera-3-25.3.20/galerautils/src/gu_datetime.cpp0000644000015300001660000000550713042054732021243 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ #include "gu_datetime.hpp" #include "gu_logger.hpp" #include "gu_utils.hpp" extern "C" { #include "gu_time.h" } std::ostream& gu::datetime::operator<<(std::ostream& os, const Date& d) { os << d.get_utc(); return os; } std::ostream& gu::datetime::operator<<(std::ostream& os, const Period& p) { os << "P"; int64_t nsecs(p.get_nsecs()); if (nsecs/Year > 0) { os << (nsecs/Year) << "Y"; nsecs %= Year; } if (nsecs/Month > 0) { os << (nsecs/Month) << "M"; nsecs %= Month; } if (nsecs/Day > 0) { os << (nsecs/Day) << "D"; nsecs %= Day; } if (nsecs > 0) { os << "T"; } if (nsecs/Hour > 0) { os << (nsecs/Hour) << "H"; nsecs %= Hour; } if (nsecs/Min > 0) { os << (nsecs/Min) << "M"; nsecs %= Min; } if (double(nsecs)/Sec >= 1.e-9) { os << (double(nsecs)/Sec) << "S"; } return os; } void gu::datetime::Date::parse(const std::string& str) { if (str == "") { return; } gu_throw_fatal << "not implemented"; } const char* const gu::datetime::Period::period_regex = "^(P)(([0-9]+)Y)?(([0-9]+)M)?(([0-9]+)D)?" /* 1 23 45 67 */ "((T)?(([0-9]+)H)?(([0-9]+)M)?(([0-9]+)(\\.([0-9]+))?S)?)?"; /* 89 11 13 15 16 */ enum { GU_P = 1, GU_YEAR = 3, GU_MONTH = 5, GU_DAY = 7, GU_HOUR = 10, GU_MIN = 12, GU_SEC = 15, GU_SEC_D = 16, GU_NUM_PARTS = 17 }; gu::RegEx const gu::datetime::Period::regex(period_regex); void gu::datetime::Period::parse(const std::string& str) { std::vector parts = regex.match(str, GU_NUM_PARTS); if (parts[GU_P].is_set() == false) { if (str == "") { return; } else { gu_throw_error (EINVAL) << "Period " << str << " not valid"; } } if (parts[GU_YEAR].is_set()) { nsecs += from_string(parts[GU_YEAR].str())*Year; } if (parts[GU_MONTH].is_set()) { nsecs += from_string(parts[GU_MONTH].str())*Month; } if (parts[GU_DAY].is_set()) { nsecs += from_string(parts[GU_DAY].str())*Day; } if (parts[GU_HOUR].is_set()) { nsecs += from_string(parts[GU_HOUR].str())*Hour; } if (parts[GU_MIN].is_set()) { nsecs += from_string(parts[GU_MIN].str())*Min; } if (parts[GU_SEC].is_set()) { long long s(from_string(parts[GU_SEC].str())); nsecs += s*Sec; } if (parts[GU_SEC_D].is_set()) { double d(from_string(parts[GU_SEC_D].str())); nsecs += static_cast(d*Sec); } } galera-3-25.3.20/galerautils/src/gu_histogram.cpp0000644000015300001660000000420413042054732021435 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #include "gu_histogram.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" #include "gu_string_utils.hpp" // strsplit() #include #include #include #include gu::Histogram::Histogram(const std::string& vals) : cnt_() { std::vector varr = gu::strsplit(vals, ','); for (std::vector::const_iterator i = varr.begin(); i != varr.end(); ++i) { double val; std::istringstream is(*i); is >> val; if (is.fail()) { gu_throw_fatal << "Parse error"; } if (cnt_.insert(std::make_pair(val, 0)).second == false) { gu_throw_fatal << "Failed to insert value: " << val; } } } void gu::Histogram::insert(const double val) { if (val < 0.0) { log_warn << "Negative value (" << val << "), discarding"; return; } // Returns element that has key greater to val, // the correct bin is one below that std::map::iterator i(cnt_.upper_bound(val)); if (i == cnt_.end()) { ++cnt_.rbegin()->second; } else if (i == cnt_.begin()) { log_warn << "value " << val << " below histogram range, discarding"; } else { --i; ++i->second; } } void gu::Histogram::clear() { for (std::map::iterator i = cnt_.begin(); i != cnt_.end(); ++i) { i->second = 0; } } std::ostream& gu::operator<<(std::ostream& os, const Histogram& hs) { std::map::const_iterator i, i_next; long long norm = 0; for (i = hs.cnt_.begin(); i != hs.cnt_.end(); ++i) { norm += i->second; } for (i = hs.cnt_.begin(); i != hs.cnt_.end(); i = i_next) { i_next = i; ++i_next; os << i->first << ":" << std::fabs(double(i->second)/double(norm)); if (i_next != hs.cnt_.end()) os << ","; } return os; } std::string gu::Histogram::to_string() const { std::ostringstream os; os << *this; return os.str(); } galera-3-25.3.20/galerautils/src/gu_debug_sync.cpp0000644000015300001660000000237313042054732021567 0ustar jenkinsjenkins// // Copyright (C) 2014 Codership Oy // #ifdef GU_DBUG_ON #include "gu_debug_sync.hpp" #include "gu_lock.hpp" #include namespace { gu::Mutex sync_mutex; typedef std::multimap SyncMap; SyncMap sync_waiters; } void gu_debug_sync_wait(const std::string& sync) { gu::Lock lock(sync_mutex); gu::Cond cond; log_debug << "enter sync wait '" << sync << "'"; SyncMap::iterator i( sync_waiters.insert(std::make_pair(sync, &cond))); lock.wait(cond); sync_waiters.erase(i); log_debug << "leave sync wait '" << sync << "'"; } void gu_debug_sync_signal(const std::string& sync) { gu::Lock lock(sync_mutex); std::pair range(sync_waiters.equal_range(sync)); for (SyncMap::iterator i(range.first); i != range.second; ++i) { log_debug << "signalling waiter"; i->second->signal(); } } std::string gu_debug_sync_waiters() { std::string ret; gu::Lock lock(sync_mutex); for (SyncMap::iterator i(sync_waiters.begin()); i != sync_waiters.end();) { ret += i->first; ++i; if (i != sync_waiters.end()) ret += " "; } return ret; } #endif // GU_DBUG_ON galera-3-25.3.20/galerautils/src/gu_log.h0000644000015300001660000000526513042054732017676 0ustar jenkinsjenkins// Copyright (C) 2007-2014 Codership Oy /** * @file Logging API * * $Id$ */ #ifndef _gu_log_h_ #define _gu_log_h_ #include "gu_macros.h" #include /* For NULL */ #if defined(__cplusplus) extern "C" { #endif /** * @typedef * Defines severity classes for log messages: * FATAL - is a fatal malfunction of the library which cannot be recovered * from. Application must close. * error - error condition in the library which prevents further normal * operation but can be recovered from by the application. E.g. EAGAIN. * warn - some abnormal condition which library can recover from by itself. * * info - just an informative log message. * * debug - debugging message. */ typedef enum gu_log_severity { GU_LOG_FATAL, GU_LOG_ERROR, GU_LOG_WARN, GU_LOG_INFO, GU_LOG_DEBUG } gu_log_severity_t; /** * @typedef * Defines a type of callback function that application can provide * to do the logging */ typedef void (*gu_log_cb_t) (int severity, const char* msg); /** Helper for macros defined below. Should not be called directly. */ extern int gu_log (gu_log_severity_t severity, const char* file, const char* function, const int line, ...); /** This variable is made global only for the purpose of using it in * gu_debug() macro and avoid calling gu_log() when debug is off. * Don't use it directly! */ extern gu_log_severity_t gu_log_max_level; #define gu_log_debug (GU_LOG_DEBUG == gu_log_max_level) #if defined(__cplusplus) } #endif #if !defined(__cplusplus) || defined(GALERA_LOG_H_ENABLE_CXX) // NOTE: don't add "\n" here even if you really want to do it #define GU_LOG_C(level, ...)\ gu_log(level, __FILE__, __func__, __LINE__,\ __VA_ARGS__, NULL) /** * @name Logging macros. * Must be implemented as macros to report the location of the code where * they are called. */ /*@{*/ #define gu_fatal(...) GU_LOG_C(GU_LOG_FATAL, __VA_ARGS__, NULL) #define gu_error(...) GU_LOG_C(GU_LOG_ERROR, __VA_ARGS__, NULL) #define gu_warn(...) GU_LOG_C(GU_LOG_WARN, __VA_ARGS__, NULL) #define gu_info(...) GU_LOG_C(GU_LOG_INFO, __VA_ARGS__, NULL) #define gu_debug(...) if (gu_unlikely(gu_log_debug)) \ { GU_LOG_C(GU_LOG_DEBUG, __VA_ARGS__, NULL); } /*@}*/ #endif /* __cplusplus */ #endif /* _gu_log_h_ */ #ifdef __GU_LOGGER__ // C++ logger should use the same stuff, so export it #ifndef _gu_log_extra_ #define _gu_log_extra_ extern "C" { extern bool gu_log_self_tstamp; extern gu_log_cb_t gu_log_cb; extern void gu_log_cb_default (int, const char*); extern const char* gu_log_level_str[]; } #endif /* _gu_log_extra_ */ #endif /* __GU_LOGGER__ */ galera-3-25.3.20/galerautils/src/gu_thread.hpp0000644000015300001660000000503713042054732020721 0ustar jenkinsjenkins// // Copyright (C) 2016 Codership Oy // // // Threading utilities // #ifndef GU_THREAD_HPP #define GU_THREAD_HPP #include #include namespace gu { // // Wrapper class for thread scheduling parameters. For details, // about values see sched_setscheduler() and pthread_setschedparams() // documentation. // class ThreadSchedparam { public: // // Default constructor. Initializes to default system // scheduling parameters. // ThreadSchedparam() : policy_(SCHED_OTHER), prio_ (0) { } // // Construct ThreadSchedparam from given policy and priority // integer values. // ThreadSchedparam(int policy, int prio) : policy_(policy), prio_ (prio) { } // // Construct ThreadSchedparam from given string representation // which must have form of // // : // // wehre policy is one of "other", "fifo", "rr" and priority // is an integer. // ThreadSchedparam(const std::string& param); // Return scheduling policy int policy() const { return policy_; } // Return scheduling priority int prio() const { return prio_ ; } // Equal to operator overload bool operator==(const ThreadSchedparam& other) const { return (policy_ == other.policy_ && prio_ == other.prio_); } // Not equal to operator overload bool operator!=(const ThreadSchedparam& other) const { return !(*this == other); } // Default system ThreadSchedparam static ThreadSchedparam system_default; void print(std::ostream& os) const; private: int policy_; // Scheduling policy int prio_; // Scheduling priority }; // // Return current scheduling parameters for given thread // ThreadSchedparam thread_get_schedparam(pthread_t thread); // // Set scheduling parameters for given thread. // // Throws gu::Exception if setting parameters fails. // void thread_set_schedparam(pthread_t thread, const ThreadSchedparam&); // // Insertion operator for ThreadSchedparam // inline std::ostream& operator<<(std::ostream& os, const gu::ThreadSchedparam& sp) { sp.print(os); return os; } } #endif // GU_THREAD_HPP galera-3-25.3.20/galerautils/src/gu_prodcons.cpp0000644000015300001660000000341613042054732021273 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy #include "gu_prodcons.hpp" #include #include using namespace std; class gu::prodcons::MessageQueue { public: MessageQueue() : que() { } bool empty() const { return que.empty(); } size_t size() const { return que.size(); } const Message& front() const { return que.front(); } void pop_front() { que.pop_front(); } void push_back(const Message& msg) { que.push_back(msg); } private: std::deque que; }; void gu::prodcons::Producer::send(const Message& msg, Message* ack) { cons.queue_and_wait(msg, ack); } const gu::prodcons::Message* gu::prodcons::Consumer::get_next_msg() { const Message* ret = 0; Lock lock(mutex); if (mque->empty() == false) { ret = &mque->front(); } return ret; } void gu::prodcons::Consumer::queue_and_wait(const Message& msg, Message* ack) { Lock lock(mutex); mque->push_back(msg); if (mque->size() == 1) { notify(); } lock.wait(msg.get_producer().get_cond()); assert(&rque->front().get_producer() == &msg.get_producer()); if (ack) { *ack = rque->front(); } rque->pop_front(); if (rque->empty() == false) { rque->front().get_producer().get_cond().signal(); } } void gu::prodcons::Consumer::return_ack(const Message& ack) { Lock lock(mutex); assert(&ack.get_producer() == &mque->front().get_producer()); rque->push_back(ack); mque->pop_front(); if (rque->size() == 1) { ack.get_producer().get_cond().signal(); } } gu::prodcons::Consumer::Consumer() : mutex(), mque(new MessageQueue), rque(new MessageQueue) { } gu::prodcons::Consumer::~Consumer() { delete mque; delete rque; } galera-3-25.3.20/galerautils/src/gu_limits.c0000644000015300001660000000740713042054732020411 0ustar jenkinsjenkins// Copyright (C) 2013-2016 Codership Oy /** * @file system limit macros * * $Id:$ */ #include "gu_limits.h" #include "gu_log.h" #include #include #include #if defined(__APPLE__) #include // doesn't seem to be used directly, but jst in case #include static long darwin_phys_pages (void) { /* Note: singleton pattern would be useful here */ vm_statistics64_data_t vm_stat; unsigned int count = HOST_VM_INFO64_COUNT; kern_return_t ret = host_statistics64 (mach_host_self (), HOST_VM_INFO64, (host_info64_t) &vm_stat, &count); if (ret != KERN_SUCCESS) { gu_error ("host_statistics64 failed with code %d", ret); return 0; } /* This gives a value a little less than physical memory of computer */ return vm_stat.free_count + vm_stat.active_count + vm_stat.inactive_count + vm_stat.wire_count; /* Exact value may be obtain via sysctl ({CTL_HW, HW_MEMSIZE}) */ /* Note: sysctl is 60% slower compared to host_statistics64 */ } static long darwin_avphys_pages (void) { vm_statistics64_data_t vm_stat; unsigned int count = HOST_VM_INFO64_COUNT; kern_return_t ret = host_statistics64 (mach_host_self (), HOST_VM_INFO64, (host_info64_t) &vm_stat, &count); if (ret != KERN_SUCCESS) { gu_error ("host_statistics64 failed with code %d", ret); return 0; } /* Note: * vm_stat.free_count == vm_page_free_count + vm_page_speculative_count */ return vm_stat.free_count - vm_stat.speculative_count; } static inline size_t page_size() { return getpagesize(); } static inline size_t phys_pages() { return darwin_phys_pages(); } static inline size_t avphys_pages() { return darwin_avphys_pages(); } #elif defined(__FreeBSD__) #include // VM_TOTAL #include // struct vmtotal #include static long freebsd_avphys_pages (void) { /* TODO: 1) sysctlnametomib may be called once */ /* 2) vm.stats.vm.v_cache_count is potentially free memory too */ int mib_vm_stats_vm_v_free_count[4]; size_t mib_sz = 4; int rc = sysctlnametomib ("vm.stats.vm.v_free_count", mib_vm_stats_vm_v_free_count, &mib_sz); if (rc != 0) { gu_error ("sysctlnametomib(vm.stats.vm.v_free_count) failed, code %d", rc); return 0; } unsigned int vm_stats_vm_v_free_count; size_t sz = sizeof (vm_stats_vm_v_free_count); rc = sysctl (mib_vm_stats_vm_v_free_count, mib_sz, &vm_stats_vm_v_free_count, &sz, NULL, 0); if (rc != 0) { gu_error ("sysctl(vm.stats.vm.v_free_count) failed with code %d", rc); return 0; } return vm_stats_vm_v_free_count; } static inline size_t page_size() { return sysconf(_SC_PAGESIZE); } static inline size_t phys_pages() { return sysconf(_SC_PHYS_PAGES); } static inline size_t avphys_pages() { return freebsd_avphys_pages(); } #else /* !__APPLE__ && !__FreeBSD__ */ static inline size_t page_size() { return sysconf(_SC_PAGESIZE); } static inline size_t phys_pages() { return sysconf(_SC_PHYS_PAGES); } static inline size_t avphys_pages() { return sysconf(_SC_AVPHYS_PAGES); } #endif /* !__APPLE__ && !__FreeBSD__ */ #define GU_DEFINE_FUNCTION(func) \ size_t gu_##func() \ { \ static size_t ret = 0; \ if (0 == ret) ret = func(); \ return ret; \ } GU_DEFINE_FUNCTION(page_size) GU_DEFINE_FUNCTION(phys_pages) GU_DEFINE_FUNCTION(avphys_pages) galera-3-25.3.20/galerautils/src/gu_mutex.hpp0000644000015300001660000000343013042054732020607 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * */ #ifndef __GU_MUTEX__ #define __GU_MUTEX__ #include #include #include #include "gu_macros.h" #include "gu_mutex.h" #include "gu_throw.hpp" namespace gu { class Mutex { public: Mutex () : value() { gu_mutex_init (&value, NULL); // always succeeds } ~Mutex () { int err = gu_mutex_destroy (&value); if (gu_unlikely(err != 0)) { gu_throw_error (err) << "pthread_mutex_destroy()"; } } void lock() { gu_mutex_lock(&value); } void unlock() { gu_mutex_unlock(&value); } protected: gu_mutex_t mutable value; private: Mutex (const Mutex&); Mutex& operator= (const Mutex&); friend class Lock; }; class RecursiveMutex { public: RecursiveMutex() : mutex_() { pthread_mutexattr_t mattr; pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&mutex_, &mattr); pthread_mutexattr_destroy(&mattr); } ~RecursiveMutex() { pthread_mutex_destroy(&mutex_); } void lock() { if (pthread_mutex_lock(&mutex_)) gu_throw_fatal; } void unlock() { if (pthread_mutex_unlock(&mutex_)) gu_throw_fatal; } private: RecursiveMutex(const RecursiveMutex&); void operator=(const RecursiveMutex&); pthread_mutex_t mutex_; }; } #endif /* __GU_MUTEX__ */ galera-3-25.3.20/galerautils/src/gu_limits.h0000644000015300001660000000252613042054732020413 0ustar jenkinsjenkins// Copyright (C) 2008-2016 Codership Oy /** * @file system limit macros * * $Id$ */ #ifndef _gu_limits_h_ #define _gu_limits_h_ #include #ifdef __cplusplus extern "C" { #endif extern size_t gu_page_size(void); extern size_t gu_phys_pages(void); extern size_t gu_avphys_pages(void); #ifdef __cplusplus } // extern "C" #endif #define GU_PAGE_SIZE gu_page_size() /* returns multiple of page size that is no less than page size */ static inline size_t gu_page_size_multiple(size_t const requested_size) { size_t const sys_page_size = GU_PAGE_SIZE; size_t const multiple = requested_size / sys_page_size; return sys_page_size * (0 == multiple ? 1 : multiple); } static inline size_t gu_avphys_bytes() { // to detect overflow on systems with >4G of RAM, see #776 unsigned long long avphys = gu_avphys_pages(); avphys *= gu_page_size(); size_t max = -1; return (avphys < max ? avphys : max); } #include #define GU_ULONG_MAX ULONG_MAX #define GU_LONG_MAX LONG_MAX #define GU_LONG_MIN LONG_MIN #ifdef ULLONG_MAX #define GU_ULLONG_MAX ULLONG_MAX #define GU_LLONG_MAX LLONG_MAX #define GU_LLONG_MIN LLONG_MIN #else #define GU_ULLONG_MAX 0xffffffffffffffffULL #define GU_LLONG_MAX 0x7fffffffffffffffLL #define GU_LLONG_MIN (-GU_LONG_LONG_MAX - 1) #endif #endif /* _gu_limits_h_ */ galera-3-25.3.20/galerautils/src/gu_string_utils.cpp0000644000015300001660000000447613042054732022201 0ustar jenkinsjenkins// Copyright (C) 2009-2010 Codership Oy #include "gu_string_utils.hpp" #include "gu_assert.hpp" #include using std::string; using std::vector; vector gu::strsplit(const string& s, char sep) { vector ret; size_t pos, prev_pos = 0; while ((pos = s.find_first_of(sep, prev_pos)) != string::npos) { ret.push_back(s.substr(prev_pos, pos - prev_pos)); prev_pos = pos + 1; } if (s.length() > prev_pos) { ret.push_back(s.substr(prev_pos, s.length() - prev_pos)); } return ret; } vector gu::tokenize(const string& s, const char sep, const char esc, const bool empty) { vector ret; size_t pos, prev_pos, search_pos; prev_pos = search_pos = 0; while ((pos = s.find_first_of(sep, search_pos)) != string::npos) { assert (pos >= prev_pos); if (esc != '\0' && pos > search_pos && esc == s[pos - 1]) { search_pos = pos + 1; continue; } if (pos > prev_pos || empty) { string t = s.substr(prev_pos, pos - prev_pos); // get rid of escapes size_t p, search_p = 0; while ((p = t.find_first_of(esc, search_p)) != string::npos && esc != '\0') { if (p > search_p) { t.erase(p, 1); search_p = p + 1; } } ret.push_back(t); } prev_pos = search_pos = pos + 1; } if (s.length() > prev_pos) { ret.push_back(s.substr(prev_pos, s.length() - prev_pos)); } else if (s.length() == prev_pos && empty) { assert(0 == prev_pos || s[prev_pos - 1] == sep); ret.push_back(""); } return ret; } void gu::trim (string& s) { const ssize_t s_length = s.length(); for (ssize_t begin = 0; begin < s_length; ++begin) { if (!isspace(s[begin])) { for (ssize_t end = s_length - 1; end >= begin; --end) { if (!isspace(s[end])) { s = s.substr(begin, end - begin + 1); return; } } assert(0); } } s.clear(); } galera-3-25.3.20/galerautils/src/gu_lock.hpp0000644000015300001660000000323513042054732020400 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * */ #ifndef __GU_LOCK__ #define __GU_LOCK__ #include #include #include "gu_exception.hpp" #include "gu_logger.hpp" #include "gu_mutex.hpp" #include "gu_cond.hpp" #include "gu_datetime.hpp" namespace gu { class Lock { pthread_mutex_t* const value; Lock (const Lock&); Lock& operator=(const Lock&); public: Lock (const Mutex& mtx) : value(&mtx.value) { int err = pthread_mutex_lock (value); if (gu_unlikely(err)) { std::string msg = "Mutex lock failed: "; msg = msg + strerror(err); throw Exception(msg.c_str(), err); } } virtual ~Lock () { int err = pthread_mutex_unlock (value); if (gu_unlikely(err)) { log_fatal << "Mutex unlock failed: " << err << " (" << strerror(err) << "), Aborting."; ::abort(); } // log_debug << "Unlocked mutex " << value; } inline void wait (const Cond& cond) { cond.ref_count++; pthread_cond_wait (&(cond.cond), value); cond.ref_count--; } inline void wait (const Cond& cond, const datetime::Date& date) { timespec ts; date._timespec(ts); cond.ref_count++; int ret = pthread_cond_timedwait (&(cond.cond), value, &ts); cond.ref_count--; if (gu_unlikely(ret)) gu_throw_error(ret); } }; } #endif /* __GU_LOCK__ */ galera-3-25.3.20/galerautils/src/gu_convert.hpp0000644000015300001660000000675313042054732021140 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy /** * @file Routines for safe integer conversion * * $Id$ */ #ifndef _gu_convert_hpp_ #define _gu_convert_hpp_ #include "gu_macros.h" #include "gu_throw.hpp" #include namespace gu { /*! * Converts from type FROM to type TO with range checking. * Generic template is for the case sizeof(FROM) > sizeof(TO). * * @param from value to convert * @param to destination (provides type TO for template instantiation) * @return value cast to TO */ template inline TO convert (const FROM& from, const TO& to) { if (gu_unlikely(from > std::numeric_limits::max() || from < std::numeric_limits::min())) { // @todo: figure out how to print type name without RTTI gu_throw_error (ERANGE) << from << " is unrepresentable with " << (std::numeric_limits::is_signed ? "signed" : "unsigned") << " " << sizeof(TO) << " bytes."; } return static_cast(from); } /* Specialized templates are for signed conversion */ template <> inline long long convert (const unsigned long long& from, const long long& to) { if (gu_unlikely(from > static_cast (std::numeric_limits::max()))) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'long long'"; } return static_cast(from); } template <> inline unsigned long long convert (const long long& from, const unsigned long long& to) { if (gu_unlikely(from < 0)) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'unsigned long long'"; } return static_cast(from); } template <> inline long convert (const unsigned long& from, const long& to) { if (gu_unlikely(from > static_cast (std::numeric_limits::max()))) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'long'"; } return static_cast(from); } template <> inline unsigned long convert (const long& from, const unsigned long& to) { if (gu_unlikely(from < 0)) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'unsigned long'"; } return static_cast(from); } template <> inline int convert (const unsigned int& from, const int& to) { if (gu_unlikely(from > static_cast (std::numeric_limits::max()))) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'long'"; } return static_cast(from); } template <> inline unsigned int convert (const int& from, const unsigned int& to) { if (gu_unlikely(from < 0)) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'unsigned long'"; } return static_cast(from); } } #endif /* _gu_convert_hpp_ */ galera-3-25.3.20/galerautils/src/gu_utils.hpp0000644000015300001660000000764113042054732020615 0ustar jenkinsjenkins// Copyright (C) 2009-2010 Codership Oy /** * @file General-purpose functions and templates * * $Id$ */ #ifndef _gu_utils_hpp_ #define _gu_utils_hpp_ #include #include #include #include #include "gu_exception.hpp" namespace gu { /* * String conversion functions for primitive types */ /*! Generic to_string() template function */ template inline std::string to_string(const T& x, std::ios_base& (*f)(std::ios_base&) = std::dec) { std::ostringstream out; out << std::showbase << f << x; return out.str(); } /*! Specialized template: make bool translate into 'true' or 'false' */ template <> inline std::string to_string(const bool& x, std::ios_base& (*f)(std::ios_base&)) { std::ostringstream out; out << std::boolalpha << x; return out.str(); } /*! Specialized template: make double to print with full precision */ template <> inline std::string to_string(const double& x, std::ios_base& (*f)(std::ios_base&)) { const int sigdigits = std::numeric_limits::digits10; // or perhaps std::numeric_limits::max_digits10? std::ostringstream out; out << std::setprecision(sigdigits) << x; return out.str(); } /*! Generic from_string() template. Default base is decimal. * @throws NotFound */ template inline T from_string(const std::string& s, std::ios_base& (*f)(std::ios_base&) = std::dec) { std::istringstream iss(s); T ret; try { if ((iss >> f >> ret).fail()) throw NotFound(); } catch (gu::Exception& e) { throw NotFound(); } return ret; } /*! Specialized template for reading strings. This is to avoid throwing * NotFound in case of empty string. */ template <> inline std::string from_string(const std::string& s, std::ios_base& (*f)(std::ios_base&)) { return s; } /*! Specialized template for reading pointers. Default base is hex. * @throws NotFound */ template <> inline void* from_string(const std::string& s, std::ios_base& (*f)(std::ios_base&)) { std::istringstream iss(s); void* ret; if ((iss >> std::hex >> ret).fail()) throw NotFound(); return ret; } extern "C" const char* gu_str2bool (const char* str, bool* bl); /*! Specialized template for reading bool. Tries both 1|0 and true|false * @throws NotFound */ template <> inline bool from_string (const std::string& s, std::ios_base& (*f)(std::ios_base&)) { bool ret; const char* const str(s.c_str()); const char* const endptr(gu_str2bool(str, &ret)); if (endptr == str || endptr == 0 || *endptr != '\0') throw NotFound(); return ret; } /*! * Substitute for the Variable Length Array on the stack from C99. * Provides automatic deallocation when out of scope: * * void foo(size_t n) { VLA bar(n); bar[0] = 5; throw; } */ template class VLA { T* array; VLA (const VLA&); VLA& operator= (const VLA&); public: VLA (size_t n) : array(new T[n]) {} ~VLA () { delete[] array; } T* operator& () { return array; } T& operator[] (size_t i) { return array[i]; } }; /*! * Object deletion operator. Convenient with STL containers containing * pointers. Example: * * @code * void cleanup() * { * for_each(container.begin(), container.end(), DeleteObject()); * container.clear(); * } * * @endcode */ class DeleteObject { public: template void operator()(T* t) { delete t; } }; /*! swap method for arrays, which does't seem to be built in all compilers */ template inline void swap_array(T (&a)[N], T (&b)[N]) { for (size_t n(0); n < N; ++n) std::swap(a[n], b[n]); } } // namespace gu #endif /* _gu_utils_hpp_ */ galera-3-25.3.20/galerautils/src/gu_mutex.h0000644000015300001660000000733513042054732020257 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy /** * @file Special mutex replacements for debugging/porting * * $Id$ */ #ifndef _gu_mutex_h_ #define _gu_mutex_h_ #include struct gu_mutex { pthread_mutex_t target_mutex; //!< for critical section pthread_mutex_t control_mutex; //!< for mutex operations volatile int lock_waiter_count; //!< # of threads waiting for lock volatile int cond_waiter_count; //!< # of threads waiting for cond volatile int holder_count; //!< must be 0 or 1 volatile pthread_t thread; /* point in source code, where called from */ volatile const char *file; volatile int line; }; /** @name Usual mutex operations storing FILE and LINE information */ /*@{*/ int gu_mutex_init_dbg (struct gu_mutex *mutex, const pthread_mutexattr_t *attr, const char *file, unsigned int line); int gu_mutex_lock_dbg (struct gu_mutex *mutex, const char *file, unsigned int line); int gu_mutex_unlock_dbg (struct gu_mutex *mutex, const char *file, unsigned int line); int gu_mutex_destroy_dbg (struct gu_mutex *mutex, const char *file, unsigned int line); int gu_cond_wait_dbg (pthread_cond_t *cond, struct gu_mutex *mutex, const char *file, unsigned int line); /*@}*/ /** Shorter mutex API for applications. * Depending on compile-time flags application will either use * debug or normal version of the mutex API */ /*@{*/ #ifdef DEBUG_MUTEX typedef struct gu_mutex gu_mutex_t; #define gu_mutex_init(M,A) gu_mutex_init_dbg ((M),(A), __FILE__, __LINE__) #define gu_mutex_lock(M) gu_mutex_lock_dbg ((M), __FILE__, __LINE__) #define gu_mutex_unlock(M) gu_mutex_unlock_dbg ((M), __FILE__, __LINE__) #define gu_mutex_destroy(M) gu_mutex_destroy_dbg((M), __FILE__, __LINE__) #define gu_cond_wait(S,M) gu_cond_wait_dbg ((S),(M), __FILE__, __LINE__) #define GU_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, \ PTHREAD_MUTEX_INITIALIZER, \ 0,0,0,0,0,0 } #else /* DEBUG_MUTEX not defined - use regular pthread functions */ typedef pthread_mutex_t gu_mutex_t; #define gu_mutex_init(M,A) pthread_mutex_init ((M),(A)) #define gu_mutex_lock(M) pthread_mutex_lock ((M)) #define gu_mutex_unlock(M) pthread_mutex_unlock ((M)) #define gu_mutex_destroy(M) pthread_mutex_destroy((M)) #define gu_cond_wait(S,M) pthread_cond_wait ((S),(M)) #define GU_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER #endif /* DEBUG_MUTEX */ /*@}*/ /* The following typedefs and macros don't do anything now, * but may be used later */ typedef pthread_t gu_thread_t; typedef pthread_cond_t gu_cond_t; #define gu_thread_create pthread_create #define gu_thread_join pthread_join #define gu_thread_cancel pthread_cancel #define gu_thread_exit pthread_exit #define gu_cond_init pthread_cond_init #define gu_cond_destroy pthread_cond_destroy #define gu_cond_signal pthread_cond_signal #define gu_cond_broadcast pthread_cond_broadcast #define gu_cond_timedwait pthread_cond_timedwait #if defined(__APPLE__) #ifdef __cplusplus extern "C" { #endif typedef int pthread_barrierattr_t; typedef struct { pthread_mutex_t mutex; pthread_cond_t cond; int count; int tripCount; } pthread_barrier_t; int pthread_barrier_init (pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count); int pthread_barrier_destroy (pthread_barrier_t *barrier); int pthread_barrier_wait (pthread_barrier_t *barrier); #ifdef __cplusplus } #endif #endif /* __APPLE__ */ #endif /* _gu_mutex_h_ */ galera-3-25.3.20/galerautils/src/gu_logger.hpp0000644000015300001660000000661313042054732020732 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * This code is based on an excellent article at Dr.Dobb's: * http://www.ddj.com/cpp/201804215?pgno=1 * * It looks ugly because it has to integrate with C logger - * in order to produce identical output */ #ifndef __GU_LOGGER__ #define __GU_LOGGER__ #include extern "C" { #include "gu_log.h" #include "gu_conf.h" } namespace gu { // some portability stuff #ifdef _gu_log_h_ enum LogLevel { LOG_FATAL = GU_LOG_FATAL, LOG_ERROR = GU_LOG_ERROR, LOG_WARN = GU_LOG_WARN, LOG_INFO = GU_LOG_INFO, LOG_DEBUG = GU_LOG_DEBUG, LOG_MAX }; typedef gu_log_cb_t LogCallback; #else enum LogLevel { LOG_FATAL, LOG_ERROR, LOG_WARN, LOG_INFO, LOG_DEBUG, LOG_MAX }; typedef void (*LogCallback) (int, const char*); #endif class Logger { private: Logger(const Logger&); Logger& operator =(const Logger&); void prepare_default (); const LogLevel level; #ifndef _gu_log_h_ static LogLevel max_level; static bool do_timestamp; static LogCallback logger; static void default_logger (int, const char*); #else #define max_level gu_log_max_level #define logger gu_log_cb #define default_logger gu_log_cb_default #endif protected: std::ostringstream os; public: Logger(LogLevel _level = LOG_INFO) : level (_level), os () {} virtual ~Logger() { logger (level, os.str().c_str()); } std::ostringstream& get(const char* file, const char* func, int line) { if (default_logger == logger) { prepare_default(); // prefix with timestamp and log level } /* provide file:func():line info only when debug logging is on */ if (static_cast(LOG_DEBUG) == static_cast(max_level)) { os << file << ':' << func << "():" << line << ": "; } return os; } static bool no_log (LogLevel lvl) { return (static_cast(lvl) > static_cast(max_level)); } static void set_debug_filter(const std::string&); static bool no_debug(const std::string&, const std::string&, const int); #ifndef _gu_log_h_ static void enable_tstamp (bool); static void enable_debug (bool); static void set_logger (LogCallback); #endif }; #define GU_LOG_CPP(level) \ if (gu::Logger::no_log(level)) {} \ else gu::Logger(level).get(__FILE__, __FUNCTION__, __LINE__) // USAGE: LOG(level) << item_1 << item_2 << ... << item_n; #define log_fatal GU_LOG_CPP(gu::LOG_FATAL) #define log_error GU_LOG_CPP(gu::LOG_ERROR) #define log_warn GU_LOG_CPP(gu::LOG_WARN) #define log_info GU_LOG_CPP(gu::LOG_INFO) #define log_debug \ if (gu::Logger::no_debug(__FILE__, __FUNCTION__, __LINE__)) {} else \ GU_LOG_CPP(gu::LOG_DEBUG) } #endif // __GU_LOGGER__ galera-3-25.3.20/galerautils/src/gu_serialize.hpp0000644000015300001660000002557013042054732021445 0ustar jenkinsjenkins/* * Copyright (C) 2009-2012 Codership Oy */ /*! * @file Helper templates for serialization/unserialization. * As we are usually working on little endian platforms, integer * storage order is little-endian - in other words we use "Galera" * order, which is by default little-endian. * * What is going on down there? Templates are good. However we do * not serialize the value of size_t variable into sizeof(size_t) * bytes. We serialize it into a globally consistent, fixed number * of bytes, regardless of the local size of size_t variable. * * Hence templating by the source variable size should not be used. * Instead there are functions/templates that serialize to an explicit * number of bytes. * * @todo Templates are safe to use with integer types only. Adjust them * to work also with classes that have special serialization * routines. * @todo Make buffer serialization functions Buffer class methods. * @todo Alignment issues. */ #ifndef GU_SERIALIZE_HPP #define GU_SERIALIZE_HPP #include "gu_throw.hpp" #include "gu_byteswap.hpp" #include "gu_buffer.hpp" #include "gu_macros.hpp" #include namespace gu { template inline size_t serial_size(const T& t) { return t.serial_size(); } template <> inline size_t serial_size(const uint8_t& b) { return sizeof(b); } template <> inline size_t serial_size(const uint16_t& b) { return sizeof(b); } template <> inline size_t serial_size(const uint32_t& b) { return sizeof(b); } template <> inline size_t serial_size(const uint64_t& b) { return sizeof(b); } /* Should not be used directly! */ template inline size_t __private_serialize(const FROM& f, void* const buf, size_t const buflen, size_t const offset) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer1); GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer2); GU_COMPILE_ASSERT(sizeof(FROM) == sizeof(TO), size_differs); size_t const ret = offset + sizeof(TO); if (gu_unlikely(ret > buflen)) { gu_throw_error(EMSGSIZE) << ret << " > " << buflen; } void* const pos(reinterpret_cast(buf) + offset); *reinterpret_cast(pos) = htog(f); return ret; } /* Should not be used directly! */ template inline size_t __private_unserialize(const void* const buf, size_t const buflen, size_t const offset, TO& t) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer1); GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer2); GU_COMPILE_ASSERT(sizeof(FROM) == sizeof(TO), size_differs); size_t const ret = offset + sizeof(t); if (gu_unlikely(ret > buflen)) { gu_throw_error(EMSGSIZE) << ret << " > " << buflen; } const void* const pos(reinterpret_cast(buf) + offset); t = gtoh(*reinterpret_cast(pos)); return ret; } template GU_FORCE_INLINE size_t serialize1(const T& t, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize1(const void* const buf, size_t const buflen, size_t const offset, T& t) { return __private_unserialize(buf, buflen, offset, t); } template GU_FORCE_INLINE size_t serialize2(const T& t, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize2(const void* const buf, size_t const buflen, size_t const offset, T& t) { return __private_unserialize(buf, buflen, offset, t); } template GU_FORCE_INLINE size_t serialize4(const T& t, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize4(const void* const buf, size_t const buflen, size_t const offset, T& t) { return __private_unserialize(buf, buflen, offset, t); } template GU_FORCE_INLINE size_t serialize8(const T& t, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize8(const void* const buf, size_t const buflen, size_t const offset, T& t) { return __private_unserialize(buf, buflen, offset, t); } template inline size_t __private_serial_size(const Buffer& sb) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, must_be_integer); if (sb.size() > std::numeric_limits::max()) gu_throw_error(ERANGE) << sb.size() << " unrepresentable in " << sizeof(ST) << " bytes."; return sizeof(ST) + sb.size(); } GU_FORCE_INLINE size_t serial_size1(const Buffer& sb) { return __private_serial_size(sb); } GU_FORCE_INLINE size_t serial_size2(const Buffer& sb) { return __private_serial_size(sb); } GU_FORCE_INLINE size_t serial_size4(const Buffer& sb) { return __private_serial_size(sb); } GU_FORCE_INLINE size_t serial_size8(const Buffer& sb) { return __private_serial_size(sb); } template inline size_t __private_serialize(const Buffer& b, void* const buf, size_t const buflen, size_t offset) { size_t const ret = offset + __private_serial_size(b); if (ret > buflen) { gu_throw_error(EMSGSIZE) << ret << " > " << buflen; } offset = __private_serialize(static_cast(b.size()), buf, buflen, offset); copy(b.begin(), b.end(), reinterpret_cast(buf) + offset); return ret; } template inline size_t __private_unserialize(const void* const buf, size_t const buflen, size_t offset, Buffer& b) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, must_be_integer); ST len(0); size_t ret = offset + sizeof(len); if (ret > buflen) gu_throw_error(EMSGSIZE) << ret << " > " << buflen; offset = __private_unserialize(buf, buflen, offset, len); ret += len; if (ret > buflen) gu_throw_error(EMSGSIZE) << ret << " > " << buflen; b.resize(len); const byte_t* const ptr(reinterpret_cast(buf)); copy(ptr + offset, ptr + ret, b.begin()); return ret; } GU_FORCE_INLINE size_t serialize1(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize1(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return __private_unserialize(buf, buflen, offset, b); } GU_FORCE_INLINE size_t serialize2(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize2(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return __private_unserialize(buf, buflen, offset, b); } GU_FORCE_INLINE size_t serialize4(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize4(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return __private_unserialize(buf, buflen, offset, b); } GU_FORCE_INLINE size_t serialize8(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return __private_serialize(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize8(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return __private_unserialize(buf, buflen, offset, b); } } // namespace gu #endif // GU_SERIALIZE_HPP galera-3-25.3.20/galerautils/src/gu_dbug.c0000644000015300001660000015150713042054732020032 0ustar jenkinsjenkins/****************************************************************************** * * * N O T I C E * * * * Copyright Abandoned, 1987, Fred Fish * * * * * * This previously copyrighted work has been placed into the public * * domain by the author and may be freely used for any purpose, * * private or commercial. * * * * Because of the number of inquiries I was receiving about the use * * of this product in commercially developed works I have decided to * * simply make it public domain to further its unrestricted use. I * * specifically would be most happy to see this material become a * * part of the standard Unix distributions by AT&T and the Berkeley * * Computer Science Research Group, and a standard part of the GNU * * system from the Free Software Foundation. * * * * I would appreciate it, as a courtesy, if this notice is left in * * all copies and derivative works. Thank you. * * * * The author makes no warranty of any kind with respect to this * * product and explicitly disclaims any implied warranties of mer- * * chantability or fitness for any particular purpose. * * * ****************************************************************************** */ /* * FILE * * dbug.c runtime support routines for dbug package * * SCCS * * @(#)dbug.c 1.25 7/25/89 * * DESCRIPTION * * These are the runtime support routines for the dbug package. * The dbug package has two main components; the user include * file containing various macro definitions, and the runtime * support routines which are called from the macro expansions. * * Externally visible functions in the runtime support module * use the naming convention pattern "_db_xx...xx_", thus * they are unlikely to collide with user defined function names. * * AUTHOR(S) * * Fred Fish (base code) * Enhanced Software Technologies, Tempe, AZ * asuvax!mcdphx!estinc!fnf * * Binayak Banerjee (profiling enhancements) * seismo!bpa!sjuvax!bbanerje * * Michael Widenius: * DBUG_DUMP - To dump a pice of memory. * PUSH_FLAG "O" - To be used insted of "o" if we don't * want flushing (for slow systems) * PUSH_FLAG "A" - as 'O', but we will append to the out file instead * of creating a new one. * Check of malloc on entry/exit (option "S") * * Alexey Yurchenko: * - Renamed global symbols for use with galera project to avoid * collisions with other software (notably MySQL) * * Teemu Ollakka: * - Slight cleanups, removed some MySQL dependencies. * - All global variables should now have _gu_db prefix. * - Thread -> state mapping for multithreaded programs. * - Changed initialization so that it is done on the first * call to _gu_db_push(). * * $Id$ */ #include #include #include #include #include #include #ifndef GU_DBUG_ON #define GU_DBUG_ON #endif #include "gu_dbug.h" /* Make a new type: bool_t */ typedef enum { FALSE = (0 != 0), TRUE = (!FALSE) } bool_t; #define _VARARGS(X) X #define FN_LIBCHAR 1024 #define FN_REFLEN 1024 #define NullS "" #include #if defined(MSDOS) || defined(__WIN__) #include #endif #ifdef _GU_DBUG_CONDITION_ #define _GU_DBUG_START_CONDITION_ "d:t" #else #define _GU_DBUG_START_CONDITION_ "" #endif /* * Manifest constants that should not require any changes. */ #define EOS '\000' /* End Of String marker */ /* * Manifest constants which may be "tuned" if desired. */ #define PRINTBUF 1024 /* Print buffer size */ #define INDENT 2 /* Indentation per trace level */ #define MAXDEPTH 200 /* Maximum trace depth default */ /* * The following flags are used to determine which * capabilities the user has enabled with the state * push macro. */ #define TRACE_ON 000001 /* Trace enabled */ #define DEBUG_ON 000002 /* Debug enabled */ #define FILE_ON 000004 /* File name print enabled */ #define LINE_ON 000010 /* Line number print enabled */ #define DEPTH_ON 000020 /* Function nest level print enabled */ #define PROCESS_ON 000040 /* Process name print enabled */ #define NUMBER_ON 000100 /* Number each line of output */ #define PROFILE_ON 000200 /* Print out profiling code */ #define PID_ON 000400 /* Identify each line with process id */ #define SANITY_CHECK_ON 001000 /* Check my_malloc on GU_DBUG_ENTER */ #define FLUSH_ON_WRITE 002000 /* Flush on every write */ #define TRACING (_gu_db_stack -> flags & TRACE_ON) #define DEBUGGING (_gu_db_stack -> flags & DEBUG_ON) #define PROFILING (_gu_db_stack -> flags & PROFILE_ON) #define STREQ(a,b) (strcmp(a,b) == 0) #define min(a,b) ((a) < (b) ? (a) : (b)) #define max(a,b) ((a) > (b) ? (a) : (b)) /* * Typedefs to make things more obvious.??? */ #ifndef __WIN__ typedef int BOOLEAN; #else #define BOOLEAN BOOL #endif /* * Make it easy to change storage classes if necessary. */ #define IMPORT extern /* Names defined externally */ #define EXPORT /* Allocated here, available globally */ #define AUTO auto /* Names to be allocated on stack */ #define REGISTER register /* Names to be placed in registers */ /* * The default file for profiling. Could also add another flag * (G?) which allowed the user to specify this. * * If the automatic variables get allocated on the stack in * reverse order from their declarations, then define AUTOS_REVERSE. * This is used by the code that keeps track of stack usage. For * forward allocation, the difference in the dbug frame pointers * represents stack used by the callee function. For reverse allocation, * the difference represents stack used by the caller function. * */ #define PROF_FILE "dbugmon.out" #define PROF_EFMT "E\t%ld\t%s\n" #define PROF_SFMT "S\t%lx\t%lx\t%s\n" #define PROF_XFMT "X\t%ld\t%s\n" #ifdef M_I386 /* predefined by xenix 386 compiler */ #define AUTOS_REVERSE 1 #endif /* * Variables which are available externally but should only * be accessed via the macro package facilities. */ FILE *_gu_db_fp_ = (FILE*) 0; /* Output stream, default stderr */ char *_gu_db_process_ = (char*) "dbug"; /* Pointer to process name; argv[0] */ FILE *_gu_db_pfp_ = (FILE*) 0; /* Profile stream, 'dbugmon.out' */ BOOLEAN _gu_db_on_ = FALSE; /* TRUE if debugging currently on */ BOOLEAN _gu_db_pon_ = FALSE; /* TRUE if profile currently on */ BOOLEAN _gu_no_db_ = TRUE; /* TRUE if no debugging at all */ /* * Externally supplied functions. */ IMPORT int _sanity(const char *file, uint line); /* * The user may specify a list of functions to trace or * debug. These lists are kept in a linear linked list, * a very simple implementation. */ struct link { char *str; /* Pointer to link's contents */ struct link *next_link; /* Pointer to the next link */ }; /* * Debugging states can be pushed or popped off of a * stack which is implemented as a linked list. Note * that the head of the list is the current state and the * stack is pushed by adding a new state to the head of the * list or popped by removing the first link. */ struct state { int flags; /* Current state flags */ int maxdepth; /* Current maximum trace depth */ uint delay; /* Delay after each output line */ int sub_level; /* Sub this from code_state->level */ FILE* out_file; /* Current output stream */ FILE* prof_file; /* Current profiling stream */ char name[FN_REFLEN]; /* Name of output file */ struct link* functions; /* List of functions */ struct link* p_functions; /* List of profiled functions */ struct link* keywords; /* List of debug keywords */ struct link* processes; /* List of process names */ struct state* next_state; /* Next state in the list */ }; /* * Local variables not seen by user. */ static struct state* _gu_db_stack = 0; typedef struct st_code_state { int lineno; /* Current debugger output line number */ int level; /* Current function nesting level */ const char* func; /* Name of current user function */ const char* file; /* Name of current user file */ char** framep; /* Pointer to current frame */ int jmplevel; /* Remember nesting level at setjmp () */ const char* jmpfunc; /* Remember current function for setjmp */ const char* jmpfile; /* Remember current file for setjmp */ /* * The following variables are used to hold the state information * between the call to _gu_db_pargs_() and _gu_db_doprnt_(), during * expansion of the GU_DBUG_PRINT macro. This is the only macro * that currently uses these variables. * * These variables are currently used only by _gu_db_pargs_() and * _gu_db_doprnt_(). */ uint u_line; /* User source code line number */ const char* u_keyword; /* Keyword for current macro */ int locked; /* If locked with _gu_db_lock_file */ } CODE_STATE; /* Parse a debug command string */ static struct link *ListParse(char *ctlp); /* Make a fresh copy of a string */ static char *StrDup(const char *str); /* Open debug output stream */ static void GU_DBUGOpenFile(const char *name, int append); #ifndef THREAD /* Open profile output stream */ static FILE *OpenProfile(const char *name); /* Profile if asked for it */ static BOOLEAN DoProfile(void); /* Return current user time (ms) */ static unsigned long Clock(void); #endif /* Close debug output stream */ static void CloseFile(FILE * fp); /* Push current debug state */ static void PushState(void); /* Test for tracing enabled */ static BOOLEAN DoTrace(CODE_STATE * state); /* Test to see if file is writable */ #if !(!defined(HAVE_ACCESS) || defined(MSDOS)) static BOOLEAN Writable(char *pathname); /* Change file owner and group */ static void ChangeOwner(char *pathname); /* Allocate memory for runtime support */ #endif static char *DbugMalloc(int size); /* Remove leading pathname components */ static char *BaseName(const char *pathname); static void DoPrefix(uint line); static void FreeList(struct link *linkp); static void Indent(int indent); static BOOLEAN InList(struct link *linkp, const char *cp); static void dbug_flush(CODE_STATE *); static void DbugExit(const char *why); static int DelayArg(int value); /* Supplied in Sys V runtime environ */ /* Break string into tokens */ static char *static_strtok(char *s1, char chr); /* * Miscellaneous printf format strings. */ #define ERR_MISSING_RETURN "%s: missing GU_DBUG_RETURN or GU_DBUG_VOID_RETURN macro in function \"%s\"\n" #define ERR_OPEN "%s: can't open debug output stream \"%s\": " #define ERR_CLOSE "%s: can't close debug file: " #define ERR_ABORT "%s: debugger aborting because %s\n" #define ERR_CHOWN "%s: can't change owner/group of \"%s\": " /* * Macros and defines for testing file accessibility under UNIX and MSDOS. */ #undef EXISTS #if !defined(HAVE_ACCESS) || defined(MSDOS) #define EXISTS(pathname) (FALSE) /* Assume no existance */ #define Writable(name) (TRUE) #else #define EXISTS(pathname) (access (pathname, F_OK) == 0) #define WRITABLE(pathname) (access (pathname, W_OK) == 0) #endif #ifndef MSDOS #define ChangeOwner(name) #endif /* * Translate some calls among different systems. */ #if defined(unix) || defined(xenix) || defined(VMS) || defined(__NetBSD__) # define Delay(A) sleep((uint) A) #elif defined(AMIGA) IMPORT int Delay(); /* Pause for given number of ticks */ #else static int Delay(int ticks); #endif /* ** Macros to allow dbugging with threads */ #ifdef THREAD #include pthread_once_t _gu_db_once = PTHREAD_ONCE_INIT; pthread_mutex_t _gu_db_mutex = PTHREAD_MUTEX_INITIALIZER; struct state_map { pthread_t th; CODE_STATE *state; struct state_map *prev; struct state_map *next; }; #define _GU_DB_STATE_MAP_BUCKETS (1 << 7) static struct state_map *_gu_db_state_map[_GU_DB_STATE_MAP_BUCKETS]; /* * This hash is probably good enough. Golden ratio 2654435761U from * http://www.concentric.net/~Ttwang/tech/inthash.htm * * UPDATE: it is good enough for input with significant variation in * 32 lower bits. */ static inline unsigned long pt_hash(const pthread_t th) { unsigned long k = (unsigned long)th; uint64_t ret = 2654435761U * k; // since we're returning a masked hash key, all considerations // for "reversibility" can be dropped. Instead we can help // higher input bits influence lower output bits. XOR rules. return (ret ^ (ret >> 32)) & (_GU_DB_STATE_MAP_BUCKETS - 1); } static CODE_STATE *state_map_find(const pthread_t th) { unsigned int key = pt_hash(th); struct state_map *sm = _gu_db_state_map[key]; while (sm && sm->th != th) sm = sm->next; return sm ? sm->state : NULL; } void state_map_insert(const pthread_t th, CODE_STATE *state) { unsigned int key; struct state_map *sm; assert(state_map_find(th) == NULL); key = pt_hash(th); sm = malloc(sizeof(struct state_map)); sm->state = state; sm->th = th; pthread_mutex_lock(&_gu_db_mutex); sm->prev = NULL; sm->next = _gu_db_state_map[key]; if (sm->next) sm->next->prev = sm; _gu_db_state_map[key] = sm; pthread_mutex_unlock(&_gu_db_mutex); } void state_map_erase(const pthread_t th) { unsigned int key; struct state_map *sm; key = pt_hash(th); sm = _gu_db_state_map[key]; while (sm && sm->th != th) sm = sm->next; assert(sm); pthread_mutex_lock(&_gu_db_mutex); if (sm->prev) { sm->prev->next = sm->next; } else { assert(_gu_db_state_map[key] == sm); _gu_db_state_map[key] = sm->next; } if (sm->next) sm->next->prev = sm->prev; pthread_mutex_unlock(&_gu_db_mutex); free(sm); } static CODE_STATE * code_state(void) { CODE_STATE *state = 0; if ((state = state_map_find(pthread_self())) == NULL) { state = malloc(sizeof(CODE_STATE)); memset(state, 0, sizeof(CODE_STATE)); state->func = "?func"; state->file = "?file"; state->u_keyword = "?"; state_map_insert(pthread_self(), state); } return state; } static void code_state_cleanup(CODE_STATE *state) { if (state->level == 0) { state_map_erase(pthread_self()); free(state); } } static void _gu_db_init() { if (!_gu_db_fp_) _gu_db_fp_ = stderr; /* Output stream, default stderr */ memset(_gu_db_state_map, 0, sizeof(_gu_db_state_map)); } #else /* !THREAD */ #define _gu_db_init() #define code_state() (&static_code_state) #define code_state_cleanup(A) do {} while (0) #define pthread_mutex_lock(A) {} #define pthread_mutex_unlock(A) {} static CODE_STATE static_code_state = { 0, 0, "?func", "?file", NULL, 0, NULL, NULL, 0, "?", 0 }; #endif /* * FUNCTION * * _gu_db_push_ push current debugger state and set up new one * * SYNOPSIS * * VOID _gu_db_push_ (control) * char *control; * * DESCRIPTION * * Given pointer to a debug control string in "control", pushes * the current debug state, parses the control string, and sets * up a new debug state. * * The only attribute of the new state inherited from the previous * state is the current function nesting level. This can be * overridden by using the "r" flag in the control string. * * The debug control string is a sequence of colon separated fields * as follows: * * ::...: * * Each field consists of a mandatory flag character followed by * an optional "," and comma separated list of modifiers: * * flag[,modifier,modifier,...,modifier] * * The currently recognized flag characters are: * * d Enable output from GU_DBUG_ macros for * for the current state. May be followed * by a list of keywords which selects output * only for the GU_DBUG macros with that keyword. * A null list of keywords implies output for * all macros. * * D Delay after each debugger output line. * The argument is the number of tenths of seconds * to delay, subject to machine capabilities. * I.E. -#D,20 is delay two seconds. * * f Limit debugging and/or tracing, and profiling to the * list of named functions. Note that a null list will * disable all functions. The appropriate "d" or "t" * flags must still be given, this flag only limits their * actions if they are enabled. * * F Identify the source file name for each * line of debug or trace output. * * i Identify the process with the pid for each line of * debug or trace output. * * g Enable profiling. Create a file called 'dbugmon.out' * containing information that can be used to profile * the program. May be followed by a list of keywords * that select profiling only for the functions in that * list. A null list implies that all functions are * considered. * * L Identify the source file line number for * each line of debug or trace output. * * n Print the current function nesting depth for * each line of debug or trace output. * * N Number each line of dbug output. * * o Redirect the debugger output stream to the * specified file. The default output is stderr. * * O As O but the file is really flushed between each * write. When neaded the file is closed and reopened * between each write. * * p Limit debugger actions to specified processes. * A process must be identified with the * GU_DBUG_PROCESS macro and match one in the list * for debugger actions to occur. * * P Print the current process name for each * line of debug or trace output. * * r When pushing a new state, do not inherit * the previous state's function nesting level. * Useful when the output is to start at the * left margin. * * S Do function _sanity(_file_,_line_) at each * debugged function until _sanity() returns * something that differs from 0. * (Moustly used with my_malloc) * * t Enable function call/exit trace lines. * May be followed by a list (containing only * one modifier) giving a numeric maximum * trace level, beyond which no output will * occur for either debugging or tracing * macros. The default is a compile time * option. * * Some examples of debug control strings which might appear * on a shell command line (the "-#" is typically used to * introduce a control string to an application program) are: * * -#d:t * -#d:f,main,subr1:F:L:t,20 * -#d,input,output,files:n * * For convenience, any leading "-#" is stripped off. * */ void _gu_db_push_(const char *control) { register char *scan; register struct link *temp; CODE_STATE *state; char *new_str; pthread_once(&_gu_db_once, &_gu_db_init); if (control && *control == '-') { if (*++control == '#') control++; } if (*control) _gu_no_db_ = FALSE; /* We are using dbug after all */ else return; new_str = StrDup(control); PushState(); state = code_state(); scan = static_strtok(new_str, ':'); for (; scan != NULL; scan = static_strtok((char *) NULL, ':')) { switch (*scan++) { case 'd': _gu_db_on_ = TRUE; _gu_db_stack->flags |= DEBUG_ON; if (*scan++ == ',') { _gu_db_stack->keywords = ListParse(scan); } break; case 'D': _gu_db_stack->delay = 0; if (*scan++ == ',') { temp = ListParse(scan); _gu_db_stack->delay = DelayArg(atoi(temp->str)); FreeList(temp); } break; case 'f': if (*scan++ == ',') { _gu_db_stack->functions = ListParse(scan); } break; case 'F': _gu_db_stack->flags |= FILE_ON; break; case 'i': _gu_db_stack->flags |= PID_ON; break; #ifndef THREAD case 'g': _gu_db_pon_ = TRUE; if (OpenProfile(PROF_FILE)) { _gu_db_stack->flags |= PROFILE_ON; if (*scan++ == ',') _gu_db_stack->p_functions = ListParse(scan); } break; #endif case 'L': _gu_db_stack->flags |= LINE_ON; break; case 'n': _gu_db_stack->flags |= DEPTH_ON; break; case 'N': _gu_db_stack->flags |= NUMBER_ON; break; case 'A': case 'O': _gu_db_stack->flags |= FLUSH_ON_WRITE; case 'a': case 'o': if (*scan++ == ',') { temp = ListParse(scan); GU_DBUGOpenFile(temp->str, (int) (scan[-2] == 'A' || scan[-2] == 'a')); FreeList(temp); } else { GU_DBUGOpenFile("-", 0); } break; case 'p': if (*scan++ == ',') { _gu_db_stack->processes = ListParse(scan); } break; case 'P': _gu_db_stack->flags |= PROCESS_ON; break; case 'r': _gu_db_stack->sub_level = state->level; break; case 't': _gu_db_stack->flags |= TRACE_ON; if (*scan++ == ',') { temp = ListParse(scan); _gu_db_stack->maxdepth = atoi(temp->str); FreeList(temp); } break; case 'S': _gu_db_stack->flags |= SANITY_CHECK_ON; break; } } free(new_str); } /* * FUNCTION * * _gu_db_pop_ pop the debug stack * * DESCRIPTION * * Pops the debug stack, returning the debug state to its * condition prior to the most recent _gu_db_push_ invocation. * Note that the pop will fail if it would remove the last * valid state from the stack. This prevents user errors * in the push/pop sequence from screwing up the debugger. * Maybe there should be some kind of warning printed if the * user tries to pop too many states. * */ void _gu_db_pop_() { register struct state *discard; discard = _gu_db_stack; if (discard != NULL && discard->next_state != NULL) { _gu_db_stack = discard->next_state; _gu_db_fp_ = _gu_db_stack->out_file; _gu_db_pfp_ = _gu_db_stack->prof_file; if (discard->keywords != NULL) { FreeList(discard->keywords); } if (discard->functions != NULL) { FreeList(discard->functions); } if (discard->processes != NULL) { FreeList(discard->processes); } if (discard->p_functions != NULL) { FreeList(discard->p_functions); } CloseFile(discard->out_file); if (discard->prof_file) CloseFile(discard->prof_file); free((char *) discard); if (!(_gu_db_stack->flags & DEBUG_ON)) _gu_db_on_ = 0; } else { if (_gu_db_stack) _gu_db_stack->flags &= ~DEBUG_ON; _gu_db_on_ = 0; } } /* * FUNCTION * * _gu_db_enter_ process entry point to user function * * SYNOPSIS * * VOID _gu_db_enter_ (_func_, _file_, _line_, * _sfunc_, _sfile_, _slevel_, _sframep_) * char *_func_; points to current function name * char *_file_; points to current file name * int _line_; called from source line number * char **_sfunc_; save previous _func_ * char **_sfile_; save previous _file_ * int *_slevel_; save previous nesting level * char ***_sframep_; save previous frame pointer * * DESCRIPTION * * Called at the beginning of each user function to tell * the debugger that a new function has been entered. * Note that the pointers to the previous user function * name and previous user file name are stored on the * caller's stack (this is why the ENTER macro must be * the first "executable" code in a function, since it * allocates these storage locations). The previous nesting * level is also stored on the callers stack for internal * self consistency checks. * * Also prints a trace line if tracing is enabled and * increments the current function nesting depth. * * Note that this mechanism allows the debugger to know * what the current user function is at all times, without * maintaining an internal stack for the function names. * */ void _gu_db_enter_(const char *_func_, const char *_file_, uint _line_, const char **_sfunc_, const char **_sfile_, uint * _slevel_, char ***_sframep_ __attribute__ ((unused))) { register CODE_STATE *state; if (!_gu_no_db_) { int save_errno = errno; state = code_state(); *_sfunc_ = state->func; *_sfile_ = state->file; state->func = (char *) _func_; state->file = (char *) _file_; /* BaseName takes time !! */ *_slevel_ = ++state->level; #ifndef THREAD *_sframep_ = state->framep; state->framep = (char **) _sframep_; if (DoProfile()) { long stackused; if (*state->framep == NULL) { stackused = 0; } else { stackused = ((long) (*state->framep)) - ((long) (state->framep)); stackused = stackused > 0 ? stackused : -stackused; } (void) fprintf(_gu_db_pfp_, PROF_EFMT, Clock(), state->func); #ifdef AUTOS_REVERSE (void) fprintf(_gu_db_pfp_, PROF_SFMT, state->framep, stackused, *_sfunc_); #else (void) fprintf(_gu_db_pfp_, PROF_SFMT, (ulong) state->framep, stackused, state->func); #endif (void) fflush(_gu_db_pfp_); } #endif if (DoTrace(state)) { if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); DoPrefix(_line_); Indent(state->level); (void) fprintf(_gu_db_fp_, ">%s\n", state->func); dbug_flush(state); /* This does a unlock */ } #ifdef SAFEMALLOC if (_gu_db_stack->flags & SANITY_CHECK_ON) if (_sanity(_file_, _line_)) /* Check of my_malloc */ _gu_db_stack->flags &= ~SANITY_CHECK_ON; #endif errno = save_errno; } } /* * FUNCTION * * _gu_db_return_ process exit from user function * * SYNOPSIS * * VOID _gu_db_return_ (_line_, _sfunc_, _sfile_, _slevel_) * int _line_; current source line number * char **_sfunc_; where previous _func_ is to be retrieved * char **_sfile_; where previous _file_ is to be retrieved * int *_slevel_; where previous level was stashed * * DESCRIPTION * * Called just before user function executes an explicit or implicit * return. Prints a trace line if trace is enabled, decrements * the current nesting level, and restores the current function and * file names from the defunct function's stack. * */ void _gu_db_return_(uint _line_, const char **_sfunc_, const char **_sfile_, uint * _slevel_) { CODE_STATE *state; if (!_gu_no_db_) { int save_errno = errno; if (!(state = code_state())) return; /* Only happens at end of program */ if (_gu_db_stack->flags & (TRACE_ON | DEBUG_ON | PROFILE_ON)) { if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); if (state->level != (int) *_slevel_) (void) fprintf(_gu_db_fp_, ERR_MISSING_RETURN, _gu_db_process_, state->func); else { #ifdef SAFEMALLOC if (_gu_db_stack->flags & SANITY_CHECK_ON) if (_sanity(*_sfile_, _line_)) _gu_db_stack->flags &= ~SANITY_CHECK_ON; #endif #ifndef THREAD if (DoProfile()) (void) fprintf(_gu_db_pfp_, PROF_XFMT, Clock(), state->func); #endif if (DoTrace(state)) { DoPrefix(_line_); Indent(state->level); (void) fprintf(_gu_db_fp_, "<%s\n", state->func); } } dbug_flush(state); } state->level = *_slevel_ - 1; state->func = *_sfunc_; state->file = *_sfile_; #ifndef THREAD if (state->framep != NULL) state->framep = (char **) *state->framep; #endif errno = save_errno; code_state_cleanup(state); } } /* * FUNCTION * * _gu_db_pargs_ log arguments for subsequent use by _gu_db_doprnt_() * * SYNOPSIS * * VOID _gu_db_pargs_ (_line_, keyword) * int _line_; * char *keyword; * * DESCRIPTION * * The new universal printing macro GU_DBUG_PRINT, which replaces * all forms of the GU_DBUG_N macros, needs two calls to runtime * support routines. The first, this function, remembers arguments * that are used by the subsequent call to _gu_db_doprnt_(). * */ void _gu_db_pargs_(uint _line_, const char *keyword) { CODE_STATE *state = code_state(); state->u_line = _line_; state->u_keyword = (char *) keyword; } /* * FUNCTION * * _gu_db_doprnt_ handle print of debug lines * * SYNOPSIS * * VOID _gu_db_doprnt_ (format, va_alist) * char *format; * va_dcl; * * DESCRIPTION * * When invoked via one of the GU_DBUG macros, tests the current keyword * set by calling _gu_db_pargs_() to see if that macro has been selected * for processing via the debugger control string, and if so, handles * printing of the arguments via the format string. The line number * of the GU_DBUG macro in the source is found in u_line. * * Note that the format string SHOULD NOT include a terminating * newline, this is supplied automatically. * */ #include void _gu_db_doprnt_(const char *format, ...) { va_list args; CODE_STATE *state; state = code_state(); va_start(args, format); if (_gu_db_keyword_(state->u_keyword)) { int save_errno = errno; if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); DoPrefix(state->u_line); if (TRACING) { Indent(state->level + 1); } else { (void) fprintf(_gu_db_fp_, "%s: ", state->func); } (void) fprintf(_gu_db_fp_, "%s: ", state->u_keyword); (void) vfprintf(_gu_db_fp_, format, args); va_end(args); (void) fputc('\n', _gu_db_fp_); dbug_flush(state); errno = save_errno; } va_end(args); code_state_cleanup(state); } /* * FUNCTION * * _gu_db_dump_ dump a string until '\0' is found * * SYNOPSIS * * void _gu_db_dump_ (_line_,keyword,memory,length) * int _line_; current source line number * char *keyword; * char *memory; Memory to print * int length; Bytes to print * * DESCRIPTION * Dump N characters in a binary array. * Is used to examine corrputed memory or arrays. */ void _gu_db_dump_(uint _line_, const char *keyword, const char *memory, uint length) { int pos; char dbuff[90]; CODE_STATE *state; state = code_state(); if (_gu_db_keyword_((char *) keyword)) { if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); DoPrefix(_line_); if (TRACING) { Indent(state->level + 1); pos = min(max(state->level - _gu_db_stack->sub_level, 0) * INDENT, 80); } else { fprintf(_gu_db_fp_, "%s: ", state->func); } sprintf(dbuff, "%s: Memory: %lx Bytes: (%d)\n", keyword, (ulong) memory, length); (void) fputs(dbuff, _gu_db_fp_); pos = 0; while (length-- > 0) { uint tmp = *((unsigned char *) memory++); if ((pos += 3) >= 80) { fputc('\n', _gu_db_fp_); pos = 3; } fputc(_gu_dig_vec[((tmp >> 4) & 15)], _gu_db_fp_); fputc(_gu_dig_vec[tmp & 15], _gu_db_fp_); fputc(' ', _gu_db_fp_); } (void) fputc('\n', _gu_db_fp_); dbug_flush(state); } code_state_cleanup(state); } /* * FUNCTION * * ListParse parse list of modifiers in debug control string * * SYNOPSIS * * static struct link *ListParse (ctlp) * char *ctlp; * * DESCRIPTION * * Given pointer to a comma separated list of strings in "cltp", * parses the list, building a list and returning a pointer to it. * The original comma separated list is destroyed in the process of * building the linked list, thus it had better be a duplicate * if it is important. * * Note that since each link is added at the head of the list, * the final list will be in "reverse order", which is not * significant for our usage here. * */ static struct link * ListParse(char *ctlp) { REGISTER char *start; REGISTER struct link *new_malloc; REGISTER struct link *head; head = NULL; while (*ctlp != EOS) { start = ctlp; while (*ctlp != EOS && *ctlp != ',') { ctlp++; } if (*ctlp == ',') { *ctlp++ = EOS; } new_malloc = (struct link *) DbugMalloc(sizeof(struct link)); new_malloc->str = StrDup(start); new_malloc->next_link = head; head = new_malloc; } return (head); } /* * FUNCTION * * InList test a given string for member of a given list * * SYNOPSIS * * static BOOLEAN InList (linkp, cp) * struct link *linkp; * char *cp; * * DESCRIPTION * * Tests the string pointed to by "cp" to determine if it is in * the list pointed to by "linkp". Linkp points to the first * link in the list. If linkp is NULL then the string is treated * as if it is in the list (I.E all strings are in the null list). * This may seem rather strange at first but leads to the desired * operation if no list is given. The net effect is that all * strings will be accepted when there is no list, and when there * is a list, only those strings in the list will be accepted. * */ static BOOLEAN InList(struct link *linkp, const char *cp) { REGISTER struct link *scan; REGISTER BOOLEAN result; if (linkp == NULL) { result = TRUE; } else { result = FALSE; for (scan = linkp; scan != NULL; scan = scan->next_link) { if (STREQ(scan->str, cp)) { result = TRUE; break; } } } return (result); } /* * FUNCTION * * PushState push current state onto stack and set up new one * * SYNOPSIS * * static VOID PushState () * * DESCRIPTION * * Pushes the current state on the state stack, and inits * a new state. The only parameter inherited from the previous * state is the function nesting level. This action can be * inhibited if desired, via the "r" flag. * * The state stack is a linked list of states, with the new * state added at the head. This allows the stack to grow * to the limits of memory if necessary. * */ static void PushState() { REGISTER struct state *new_malloc; new_malloc = (struct state *) DbugMalloc(sizeof(struct state)); new_malloc->flags = 0; new_malloc->delay = 0; new_malloc->maxdepth = MAXDEPTH; new_malloc->sub_level = 0; new_malloc->out_file = stderr; new_malloc->prof_file = (FILE *) 0; new_malloc->functions = NULL; new_malloc->p_functions = NULL; new_malloc->keywords = NULL; new_malloc->processes = NULL; new_malloc->next_state = _gu_db_stack; _gu_db_stack = new_malloc; } /* * FUNCTION * * DoTrace check to see if tracing is current enabled * * SYNOPSIS * * static BOOLEAN DoTrace (stack) * * DESCRIPTION * * Checks to see if tracing is enabled based on whether the * user has specified tracing, the maximum trace depth has * not yet been reached, the current function is selected, * and the current process is selected. Returns TRUE if * tracing is enabled, FALSE otherwise. * */ static BOOLEAN DoTrace(CODE_STATE * state) { register BOOLEAN trace = FALSE; if (TRACING && state->level <= _gu_db_stack->maxdepth && InList(_gu_db_stack->functions, state->func) && InList(_gu_db_stack->processes, _gu_db_process_)) trace = TRUE; return (trace); } /* * FUNCTION * * DoProfile check to see if profiling is current enabled * * SYNOPSIS * * static BOOLEAN DoProfile () * * DESCRIPTION * * Checks to see if profiling is enabled based on whether the * user has specified profiling, the maximum trace depth has * not yet been reached, the current function is selected, * and the current process is selected. Returns TRUE if * profiling is enabled, FALSE otherwise. * */ #ifndef THREAD static BOOLEAN DoProfile() { REGISTER BOOLEAN profile; CODE_STATE *state; state = code_state(); profile = FALSE; if (PROFILING && state->level <= _gu_db_stack->maxdepth && InList(_gu_db_stack->p_functions, state->func) && InList(_gu_db_stack->processes, _gu_db_process_)) profile = TRUE; return (profile); } #endif /* * FUNCTION * * _gu_db_keyword_ test keyword for member of keyword list * * SYNOPSIS * * BOOLEAN _gu_db_keyword_ (keyword) * char *keyword; * * DESCRIPTION * * Test a keyword to determine if it is in the currently active * keyword list. As with the function list, a keyword is accepted * if the list is null, otherwise it must match one of the list * members. When debugging is not on, no keywords are accepted. * After the maximum trace level is exceeded, no keywords are * accepted (this behavior subject to change). Additionally, * the current function and process must be accepted based on * their respective lists. * * Returns TRUE if keyword accepted, FALSE otherwise. * */ BOOLEAN _gu_db_keyword_(const char *keyword) { REGISTER BOOLEAN result; CODE_STATE *state; state = code_state(); result = FALSE; if (DEBUGGING && state->level <= _gu_db_stack->maxdepth && InList(_gu_db_stack->functions, state->func) && InList(_gu_db_stack->keywords, keyword) && InList(_gu_db_stack->processes, _gu_db_process_)) result = TRUE; return (result); } /* * FUNCTION * * Indent indent a line to the given indentation level * * SYNOPSIS * * static VOID Indent (indent) * int indent; * * DESCRIPTION * * Indent a line to the given level. Note that this is * a simple minded but portable implementation. * There are better ways. * * Also, the indent must be scaled by the compile time option * of character positions per nesting level. * */ static void Indent(int indent) { REGISTER int count; indent = max(indent - 1 - _gu_db_stack->sub_level, 0) * INDENT; for (count = 0; count < indent; count++) { if ((count % INDENT) == 0) fputc('|', _gu_db_fp_); else fputc(' ', _gu_db_fp_); } } /* * FUNCTION * * FreeList free all memory associated with a linked list * * SYNOPSIS * * static VOID FreeList (linkp) * struct link *linkp; * * DESCRIPTION * * Given pointer to the head of a linked list, frees all * memory held by the list and the members of the list. * */ static void FreeList(struct link *linkp) { REGISTER struct link *old; while (linkp != NULL) { old = linkp; linkp = linkp->next_link; if (old->str != NULL) { free(old->str); } free((char *) old); } } /* * FUNCTION * * StrDup make a duplicate of a string in new memory * * SYNOPSIS * * static char *StrDup (my_string) * char *string; * * DESCRIPTION * * Given pointer to a string, allocates sufficient memory to make * a duplicate copy, and copies the string to the newly allocated * memory. Failure to allocated sufficient memory is immediately * fatal. * */ static char * StrDup(const char *str) { register char *new_malloc; new_malloc = DbugMalloc((int) strlen(str) + 1); (void) strcpy(new_malloc, str); return (new_malloc); } /* * FUNCTION * * DoPrefix print debugger line prefix prior to indentation * * SYNOPSIS * * static VOID DoPrefix (_line_) * int _line_; * * DESCRIPTION * * Print prefix common to all debugger output lines, prior to * doing indentation if necessary. Print such information as * current process name, current source file name and line number, * and current function nesting depth. * */ static void DoPrefix(uint _line_) { CODE_STATE *state; state = code_state(); state->lineno++; if (_gu_db_stack->flags & PID_ON) { #ifdef THREAD (void) fprintf(_gu_db_fp_, "%5d:(thread %lu):", (int)getpid(), (unsigned long)pthread_self()); #else (void) fprintf(_gu_db_fp_, "%5d: ", (int) getpid()); #endif /* THREAD */ } if (_gu_db_stack->flags & NUMBER_ON) { (void) fprintf(_gu_db_fp_, "%5d: ", state->lineno); } if (_gu_db_stack->flags & PROCESS_ON) { (void) fprintf(_gu_db_fp_, "%s: ", _gu_db_process_); } if (_gu_db_stack->flags & FILE_ON) { (void) fprintf(_gu_db_fp_, "%14s: ", BaseName(state->file)); } if (_gu_db_stack->flags & LINE_ON) { (void) fprintf(_gu_db_fp_, "%5d: ", _line_); } if (_gu_db_stack->flags & DEPTH_ON) { (void) fprintf(_gu_db_fp_, "%4d: ", state->level); } } /* * FUNCTION * * GU_DBUGOpenFile open new output stream for debugger output * * SYNOPSIS * * static VOID GU_DBUGOpenFile (name) * char *name; * * DESCRIPTION * * Given name of a new file (or "-" for stdout) opens the file * and sets the output stream to the new file. * */ static void GU_DBUGOpenFile(const char *name, int append) { REGISTER FILE *fp; REGISTER BOOLEAN newfile; if (name != NULL) { strcpy(_gu_db_stack->name, name); if (strlen(name) == 1 && name[0] == '-') { _gu_db_fp_ = stdout; _gu_db_stack->out_file = _gu_db_fp_; _gu_db_stack->flags |= FLUSH_ON_WRITE; } else { if (!Writable((char *) name)) { (void) fprintf(stderr, ERR_OPEN, _gu_db_process_, name); perror(""); fflush(stderr); } else { newfile = !EXISTS(name); if (!(fp = fopen(name, append ? "a+" : "w"))) { (void) fprintf(stderr, ERR_OPEN, _gu_db_process_, name); perror(""); fflush(stderr); } else { _gu_db_fp_ = fp; _gu_db_stack->out_file = fp; if (newfile) { ChangeOwner(name); } } } } } } /* * FUNCTION * * OpenProfile open new output stream for profiler output * * SYNOPSIS * * static FILE *OpenProfile (name) * char *name; * * DESCRIPTION * * Given name of a new file, opens the file * and sets the profiler output stream to the new file. * * It is currently unclear whether the prefered behavior is * to truncate any existing file, or simply append to it. * The latter behavior would be desirable for collecting * accumulated runtime history over a number of separate * runs. It might take some changes to the analyzer program * though, and the notes that Binayak sent with the profiling * diffs indicated that append was the normal mode, but this * does not appear to agree with the actual code. I haven't * investigated at this time [fnf; 24-Jul-87]. */ #ifndef THREAD static FILE * OpenProfile(const char *name) { REGISTER FILE *fp; REGISTER BOOLEAN newfile; fp = 0; if (!Writable(name)) { (void) fprintf(_gu_db_fp_, ERR_OPEN, _gu_db_process_, name); perror(""); dbug_flush(0); (void) Delay(_gu_db_stack->delay); } else { newfile = !EXISTS(name); if (!(fp = fopen(name, "w"))) { (void) fprintf(_gu_db_fp_, ERR_OPEN, _gu_db_process_, name); perror(""); dbug_flush(0); } else { _gu_db_pfp_ = fp; _gu_db_stack->prof_file = fp; if (newfile) { ChangeOwner(name); } } } return fp; } #endif /* * FUNCTION * * CloseFile close the debug output stream * * SYNOPSIS * * static VOID CloseFile (fp) * FILE *fp; * * DESCRIPTION * * Closes the debug output stream unless it is standard output * or standard error. * */ static void CloseFile(FILE * fp) { if (fp != stderr && fp != stdout) { if (fclose(fp) == EOF) { pthread_mutex_lock(&_gu_db_mutex); (void) fprintf(_gu_db_fp_, ERR_CLOSE, _gu_db_process_); perror(""); dbug_flush(0); } } } /* * FUNCTION * * DbugExit print error message and exit * * SYNOPSIS * * static VOID DbugExit (why) * char *why; * * DESCRIPTION * * Prints error message using current process name, the reason for * aborting (typically out of memory), and exits with status 1. * This should probably be changed to use a status code * defined in the user's debugger include file. * */ static void DbugExit(const char *why) { (void) fprintf(stderr, ERR_ABORT, _gu_db_process_, why); (void) fflush(stderr); exit(1); } /* * FUNCTION * * DbugMalloc allocate memory for debugger runtime support * * SYNOPSIS * * static long *DbugMalloc (size) * int size; * * DESCRIPTION * * Allocate more memory for debugger runtime support functions. * Failure to to allocate the requested number of bytes is * immediately fatal to the current process. This may be * rather unfriendly behavior. It might be better to simply * print a warning message, freeze the current debugger state, * and continue execution. * */ static char * DbugMalloc(int size) { register char *new_malloc; if (!(new_malloc = (char *) malloc((unsigned int) size))) DbugExit("out of memory"); return (new_malloc); } /* * As strtok but two separators in a row are changed to one * separator (to allow directory-paths in dos). */ static char * static_strtok(char *s1, char separator) { static char *end = NULL; register char *rtnval, *cpy; rtnval = NULL; if (s1 != NULL) end = s1; if (end != NULL && *end != EOS) { rtnval = cpy = end; do { if ((*cpy++ = *end++) == separator) { if (*end != separator) { cpy--; /* Point at separator */ break; } end++; /* Two separators in a row, skipp one */ } } while (*end != EOS); *cpy = EOS; /* Replace last separator */ } return (rtnval); } /* * FUNCTION * * BaseName strip leading pathname components from name * * SYNOPSIS * * static char *BaseName (pathname) * char *pathname; * * DESCRIPTION * * Given pointer to a complete pathname, locates the base file * name at the end of the pathname and returns a pointer to * it. * */ static char * BaseName(const char *pathname) { register const char *base; base = strrchr(pathname, FN_LIBCHAR); // if (base++ == NullS) - this doesn't make sense if (NULL == base || '\0' == base[1]) base = pathname; return ((char *) base); } /* * FUNCTION * * Writable test to see if a pathname is writable/creatable * * SYNOPSIS * * static BOOLEAN Writable (pathname) * char *pathname; * * DESCRIPTION * * Because the debugger might be linked in with a program that * runs with the set-uid-bit (suid) set, we have to be careful * about opening a user named file for debug output. This consists * of checking the file for write access with the real user id, * or checking the directory where the file will be created. * * Returns TRUE if the user would normally be allowed write or * create access to the named file. Returns FALSE otherwise. * */ #ifndef Writable static BOOLEAN Writable(char *pathname) { REGISTER BOOLEAN granted; REGISTER char *lastslash; granted = FALSE; if (EXISTS(pathname)) { if (WRITABLE(pathname)) { granted = TRUE; } } else { lastslash = strrchr(pathname, '/'); if (lastslash != NULL) { *lastslash = EOS; } else { pathname = "."; } if (WRITABLE(pathname)) { granted = TRUE; } if (lastslash != NULL) { *lastslash = '/'; } } return (granted); } #endif /* * FUNCTION * * ChangeOwner change owner to real user for suid programs * * SYNOPSIS * * static VOID ChangeOwner (pathname) * * DESCRIPTION * * For unix systems, change the owner of the newly created debug * file to the real owner. This is strictly for the benefit of * programs that are running with the set-user-id bit set. * * Note that at this point, the fact that pathname represents * a newly created file has already been established. If the * program that the debugger is linked to is not running with * the suid bit set, then this operation is redundant (but * harmless). * */ #ifndef ChangeOwner static void ChangeOwner(char *pathname) { if (chown(pathname, getuid(), getgid()) == -1) { (void) fprintf(stderr, ERR_CHOWN, _gu_db_process_, pathname); perror(""); (void) fflush(stderr); } } #endif /* * FUNCTION * * _gu_db_setjmp_ save debugger environment * * SYNOPSIS * * VOID _gu_db_setjmp_ () * * DESCRIPTION * * Invoked as part of the user's GU_DBUG_SETJMP macro to save * the debugger environment in parallel with saving the user's * environment. * */ #ifdef HAVE_LONGJMP void _gu_db_setjmp_() { CODE_STATE *state; state = code_state(); state->jmplevel = state->level; state->jmpfunc = state->func; state->jmpfile = state->file; } /* * FUNCTION * * _gu_db_longjmp_ restore previously saved debugger environment * * SYNOPSIS * * VOID _gu_db_longjmp_ () * * DESCRIPTION * * Invoked as part of the user's GU_DBUG_LONGJMP macro to restore * the debugger environment in parallel with restoring the user's * previously saved environment. * */ void _gu_db_longjmp_() { CODE_STATE *state; state = code_state(); state->level = state->jmplevel; if (state->jmpfunc) { state->func = state->jmpfunc; } if (state->jmpfile) { state->file = state->jmpfile; } } #endif /* * FUNCTION * * DelayArg convert D flag argument to appropriate value * * SYNOPSIS * * static int DelayArg (value) * int value; * * DESCRIPTION * * Converts delay argument, given in tenths of a second, to the * appropriate numerical argument used by the system to delay * that that many tenths of a second. For example, on the * amiga, there is a system call "Delay()" which takes an * argument in ticks (50 per second). On unix, the sleep * command takes seconds. Thus a value of "10", for one * second of delay, gets converted to 50 on the amiga, and 1 * on unix. Other systems will need to use a timing loop. * */ #ifdef AMIGA #define HZ (50) /* Probably in some header somewhere */ #endif static int DelayArg(int value) { uint delayarg = 0; #if (unix || xenix) delayarg = value / 10; /* Delay is in seconds for sleep () */ #endif #ifdef AMIGA delayarg = (HZ * value) / 10; /* Delay in ticks for Delay () */ #endif return (delayarg); } /* * A dummy delay stub for systems that do not support delays. * With a little work, this can be turned into a timing loop. */ #if ! defined(Delay) && ! defined(AMIGA) static int Delay(int ticks) { return ticks; } #endif /* * FUNCTION * * perror perror simulation for systems that don't have it * * SYNOPSIS * * static VOID perror (s) * char *s; * * DESCRIPTION * * Perror produces a message on the standard error stream which * provides more information about the library or system error * just encountered. The argument string s is printed, followed * by a ':', a blank, and then a message and a newline. * * An undocumented feature of the unix perror is that if the string * 's' is a null string (NOT a NULL pointer!), then the ':' and * blank are not printed. * * This version just complains about an "unknown system error". * */ /* flush dbug-stream, free mutex lock & wait delay */ /* This is because some systems (MSDOS!!) dosn't flush fileheader */ /* and dbug-file isn't readable after a system crash !! */ static void dbug_flush(CODE_STATE * state) { #ifndef THREAD if (_gu_db_stack->flags & FLUSH_ON_WRITE) #endif { #if defined(MSDOS) || defined(__WIN__) if (_gu_db_fp_ != stdout && _gu_db_fp_ != stderr) { if (!(freopen(_gu_db_stack->name, "a", _gu_db_fp_))) { (void) fprintf(stderr, ERR_OPEN, _gu_db_process_,_gu_db_stack->name); fflush(stderr); _gu_db_fp_ = stdout; _gu_db_stack->out_file = _gu_db_fp_; _gu_db_stack->flags |= FLUSH_ON_WRITE; } } else #endif { (void) fflush(_gu_db_fp_); if (_gu_db_stack->delay) (void) Delay(_gu_db_stack->delay); } } if (!state || !state->locked) pthread_mutex_unlock(&_gu_db_mutex); } /* dbug_flush */ void _gu_db_lock_file() { CODE_STATE *state; state = code_state(); pthread_mutex_lock(&_gu_db_mutex); state->locked = 1; } void _gu_db_unlock_file() { CODE_STATE *state; state = code_state(); state->locked = 0; pthread_mutex_unlock(&_gu_db_mutex); } /* * Here we need the definitions of the clock routine. Add your * own for whatever system that you have. */ #ifndef THREAD #if defined(HAVE_GETRUSAGE) #include #include /* extern int getrusage(int, struct rusage *); */ /* * Returns the user time in milliseconds used by this process so * far. */ static unsigned long Clock() { struct rusage ru; (void) getrusage(RUSAGE_SELF, &ru); return ((ru.ru_utime.tv_sec * 1000) + (ru.ru_utime.tv_usec / 1000)); } #elif defined(MSDOS) || defined(__WIN__) || defined(OS2) static ulong Clock() { return clock() * (1000 / Cmy_pthread_mutex_lockS_PER_SEC); } #elif defined (amiga) struct DateStamp { /* Yes, this is a hack, but doing it right */ long ds_Days; /* is incredibly ugly without splitting this */ long ds_Minute; /* off into a separate file */ long ds_Tick; }; static int first_clock = TRUE; static struct DateStamp begin; static struct DateStamp elapsed; static unsigned long Clock() { register struct DateStamp *now; register unsigned long millisec = 0; extern VOID *AllocMem(); now = (struct DateStamp *) AllocMem((long) sizeof(struct DateStamp), 0L); if (now != NULL) { if (first_clock == TRUE) { first_clock = FALSE; (void) DateStamp(now); begin = *now; } (void) DateStamp(now); millisec = 24 * 3600 * (1000 / HZ) * (now->ds_Days - begin.ds_Days); millisec += 60 * (1000 / HZ) * (now->ds_Minute - begin.ds_Minute); millisec += (1000 / HZ) * (now->ds_Tick - begin.ds_Tick); (void) FreeMem(now, (long) sizeof(struct DateStamp)); } return (millisec); } #else static unsigned long Clock() { return (0); } #endif /* RUSAGE */ #endif /* THREADS */ #ifdef NO_VARARGS /* * Fake vfprintf for systems that don't support it. If this * doesn't work, you are probably SOL... */ static int vfprintf(stream, format, ap) FILE *stream; char *format; va_list ap; { int rtnval; ARGS_DCL; ARG0 = va_arg(ap, ARGS_TYPE); ARG1 = va_arg(ap, ARGS_TYPE); ARG2 = va_arg(ap, ARGS_TYPE); ARG3 = va_arg(ap, ARGS_TYPE); ARG4 = va_arg(ap, ARGS_TYPE); ARG5 = va_arg(ap, ARGS_TYPE); ARG6 = va_arg(ap, ARGS_TYPE); ARG7 = va_arg(ap, ARGS_TYPE); ARG8 = va_arg(ap, ARGS_TYPE); ARG9 = va_arg(ap, ARGS_TYPE); rtnval = fprintf(stream, format, ARGS_LIST); return (rtnval); } #endif /* NO_VARARGS */ char _gu_dig_vec[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; galera-3-25.3.20/galerautils/src/gu_fdesc.cpp0000644000015300001660000001526213042054732020532 0ustar jenkinsjenkins/* * Copyright (C) 2009-2016 Codership Oy * * $Id$ */ #include "gu_fdesc.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" extern "C" { #include "gu_limits.h" } #if !defined(_XOPEN_SOURCE) && !defined(__APPLE__) #define _XOPEN_SOURCE 600 #endif #include #include #include #include #include #include #ifndef O_CLOEXEC // CentOS < 6.0 does not have it #define O_CLOEXEC 0 #endif #ifndef O_NOATIME #define O_NOATIME 0 #endif namespace gu { static int const OPEN_FLAGS = O_RDWR | O_NOATIME | O_CLOEXEC; static int const CREATE_FLAGS = OPEN_FLAGS | O_CREAT /*| O_TRUNC*/; FileDescriptor::FileDescriptor (const std::string& fname, bool const sync) : name_(fname), fd_ (open (name_.c_str(), OPEN_FLAGS, S_IRUSR | S_IWUSR)), size_(lseek (fd_, 0, SEEK_END)), sync_(sync) { constructor_common(); } static unsigned long long available_storage(const std::string& name, size_t size) { static size_t const reserve(1 << 20); // reserve 1M free space struct statvfs stat; int const err(statvfs(name.c_str(), &stat)); if (0 == err) { unsigned long long const free_size(stat.f_bavail * stat.f_bsize); if (reserve < free_size) { return free_size - reserve; } else { return 0; } } else { int const errn(errno); log_warn << "statvfs() failed on '" << name << "' partition: " << errn << " (" << strerror(errn) <<"). Proceeding anyway."; return std::numeric_limits::max(); } } FileDescriptor::FileDescriptor (const std::string& fname, size_t const size, bool const allocate, bool const sync) : name_(fname), fd_ (open (fname.c_str(), CREATE_FLAGS, S_IRUSR | S_IWUSR)), size_(size), sync_(sync) { constructor_common(); off_t const current_size(lseek (fd_, 0, SEEK_END)); if (current_size < size_) { unsigned long long const available(available_storage(name_, size_)); if (size_t(size_) > available) { ::close(fd_); ::unlink(name_.c_str()); gu_throw_error(ENOSPC) << "Requested size " << size_ << " for '" << name_ << "' exceeds available storage space " << available; } if (allocate) { // reserve space that hasn't been reserved prealloc (current_size); } else { // reserve size or bus error follows mmap() write_byte (size_ - 1); } } else if (current_size > size_) { log_debug << "Truncating '" << name_<< "' to " << size_<< " bytes."; if (ftruncate(fd_, size_)) { gu_throw_error(errno) << "Failed to truncate '" << name_ << "' to " << size_ << " bytes."; } } else { log_debug << "Reusing existing '" << name_ << "'."; } } void FileDescriptor::constructor_common() { if (fd_ < 0) { gu_throw_error(errno) << "Failed to open file '" + name_ + '\''; } #if !defined(__APPLE__) /* Darwin does not have posix_fadvise */ /* benefits are questionable int err(posix_fadvise (value, 0, size, POSIX_FADV_SEQUENTIAL)); if (err != 0) { log_warn << "Failed to set POSIX_FADV_SEQUENTIAL on " << name << ": " << err << " (" << strerror(err) << ")"; } */ #endif log_debug << "Opened file '" << name_ << "', size: " << size_; log_debug << "File descriptor: " << fd_; } FileDescriptor::~FileDescriptor () { if (sync_) { try { sync(); } catch (Exception& e) { log_error << e.what(); } } if (close(fd_) != 0) { int const err(errno); log_error << "Failed to close file '" << name_ << "': " << err << " (" << strerror(err) << '\''; } else { log_debug << "Closed file '" << name_ << "'"; } } void FileDescriptor::sync () const { log_debug << "Flushing file '" << name_ << "'"; if (fsync (fd_) < 0) { gu_throw_error(errno) << "fsync() failed on '" + name_ + '\''; } log_debug << "Flushed file '" << name_ << "'"; } bool FileDescriptor::write_byte (off_t offset) { byte_t const byte (0); if (lseek (fd_, offset, SEEK_SET) != offset) gu_throw_error(errno) << "lseek() failed on '" << name_ << '\''; if (write (fd_, &byte, sizeof(byte)) != sizeof(byte)) gu_throw_error(errno) << "write() failed on '" << name_ << '\''; return true; } /*! prealloc() fallback */ void FileDescriptor::write_file (off_t const start) { // last byte of the start page off_t offset = (start / GU_PAGE_SIZE + 1) * GU_PAGE_SIZE - 1; log_info << "Preallocating " << (size_ - start) << '/' << size_ << " bytes in '" << name_ << "'..."; while (offset < size_ && write_byte (offset)) { offset += GU_PAGE_SIZE; } if (offset >= size_ && write_byte (size_ - 1)) { sync(); return; } gu_throw_error (errno) << "File preallocation failed"; } void FileDescriptor::prealloc(off_t const start) { off_t const diff (size_ - start); log_debug << "Preallocating " << diff << '/' << size_ << " bytes in '" << name_ << "'..."; #if defined(__APPLE__) if (0 != fcntl (fd_, F_SETSIZE, size_) && 0 != ftruncate (fd_, size_)) #else if (0 != posix_fallocate (fd_, start, diff)) #endif { if ((EINVAL == errno || ENOSYS == errno) && start >= 0 && diff > 0) { // FS does not support the operation, try physical write write_file (start); } else { gu_throw_error (errno) << "File preallocation failed"; } } } } galera-3-25.3.20/galerautils/src/gu_resolver.hpp0000644000015300001660000001644713042054732021322 0ustar jenkinsjenkins/* * Copyright (C) 2008-2012 Codership Oy * * $Id$ */ /*! * @file gu_resolver.hpp Simple resolver utility */ #ifndef __GU_RESOLVER_HPP__ #define __GU_RESOLVER_HPP__ #include "gu_throw.hpp" #include #include #include #include // Forward declarations namespace gu { class URI; } // namespace gu // Declarations namespace gu { namespace net { /*! * @class Sockaddr * * @brief Class encapsulating struct sockaddr. * * Class encapsulating struct sockaddr and providing * simple interface to access sockaddr fields. */ class Sockaddr; /*! * @class IMReq * * @brief Class encapsulating imreq structs. */ class MReq; /*! * @class Addrinfo * * @brief Class encapsulating struct addrinfo. * * Class encapsulating struct addrinfo and providing interface * to access addrinfo fields. */ class Addrinfo; /*! * Resolve address given in @uri * * @return Addrinfo object representing address * * @throw gu::Exception in case of failure */ Addrinfo resolve(const gu::URI& uri); } // namespace net } // namespace gu class gu::net::Sockaddr { public: /*! * Default constuctor. * * @param sa Pointer to sockaddr struct * @param sa_len Length of sockaddr struct */ Sockaddr(const sockaddr* sa, socklen_t sa_len); /*! * Copy constructor. * * @param sa Reference to Sockaddr */ Sockaddr(const Sockaddr& sa); /*! * Destructor */ ~Sockaddr(); /*! * Get address family. * * @return Address family */ sa_family_t get_family() const { return sa_->sa_family; } /*! * Get port in network byte order. This is applicable only * for AF_INET, AF_INET6. * * @return Port in nework byte order */ unsigned short get_port() const { switch(sa_->sa_family) { case AF_INET: return reinterpret_cast(sa_)->sin_port; case AF_INET6: return reinterpret_cast(sa_)->sin6_port; default: gu_throw_fatal; } } /*! * Get pointer to address. Return value is pointer to void, * user must do casting by himself. * * @todo: Figure out how this could be done in type safe way. * * @return Void pointer to address element. */ const void* get_addr() const { switch(sa_->sa_family) { case AF_INET: return &reinterpret_cast(sa_)->sin_addr; case AF_INET6: return &reinterpret_cast(sa_)->sin6_addr; default: gu_throw_fatal << "invalid address family: " << sa_->sa_family; } } socklen_t get_addr_len() const { switch(sa_->sa_family) { case AF_INET: return sizeof(reinterpret_cast(sa_)->sin_addr); case AF_INET6: return sizeof(reinterpret_cast(sa_)->sin6_addr); default: gu_throw_fatal; } } /*! * Get non-const reference to sockaddr struct. * * @return Non-const reference to sockaddr struct. */ sockaddr& get_sockaddr() { return *sa_; } /*! * Get const reference to sockaddr struct. * * @return Const reference to sockaddr struct. */ const sockaddr& get_sockaddr() const { return *sa_; } /*! * Get length of sockaddr struct. * * @return Length of sockaddr struct */ socklen_t get_sockaddr_len() const { return sa_len_; } bool is_multicast() const; bool is_broadcast() const; bool is_anyaddr() const; static Sockaddr get_anyaddr(const Sockaddr& sa) { Sockaddr ret(sa); switch(ret.sa_->sa_family) { case AF_INET: reinterpret_cast(ret.sa_)->sin_addr.s_addr = 0; break; case AF_INET6: memset(&reinterpret_cast(ret.sa_)->sin6_addr, 0, sizeof(struct in6_addr)); break; default: gu_throw_fatal << "invalid address family: " << ret.sa_->sa_family; } return ret; } Sockaddr& operator=(const Sockaddr& sa) { memcpy(sa_, sa.sa_, sa_len_); return *this; } private: sockaddr* sa_; socklen_t sa_len_; }; class gu::net::MReq { public: MReq(const Sockaddr& mcast_addr, const Sockaddr& if_addr); ~MReq(); const void* get_mreq() const { return mreq_; } socklen_t get_mreq_len() const { return mreq_len_; } int get_ipproto() const { return ipproto_; } int get_add_membership_opt() const { return add_membership_opt_; } int get_drop_membership_opt() const { return drop_membership_opt_; } int get_multicast_if_opt() const { return multicast_if_opt_; } int get_multicast_loop_opt() const { return multicast_loop_opt_; } int get_multicast_ttl_opt() const { return multicast_ttl_opt_; } const void* get_multicast_if_value() const; int get_multicast_if_value_size() const; private: MReq(const MReq&); void operator=(const MReq&); void* mreq_; socklen_t mreq_len_; int ipproto_; int add_membership_opt_; int drop_membership_opt_; int multicast_if_opt_; int multicast_loop_opt_; int multicast_ttl_opt_; }; class gu::net::Addrinfo { public: /*! * Default constructor. * * @param ai Const reference to addrinfo struct */ Addrinfo(const addrinfo& ai); /*! * Copy costructor. * * @param ai Const reference to Addrinfo object to copy */ Addrinfo(const Addrinfo& ai); /*! * Copy constructor that replaces @ai sockaddr struct. * * @param ai Const reference to Addrinfo object to copy * @param sa Const reference to Sockaddr struct that replaces * @ai sockaddr data */ Addrinfo(const Addrinfo& ai, const Sockaddr& sa); /*! * Destructor. */ ~Addrinfo(); /*! * Get address family, AF_INET, AF_INET6 etc. * * @return Address family */ int get_family() const { return ai_.ai_family; } /*! * Get socket type, SOCK_STREAM, SOCK_DGRAM etc * * @return Socket type */ int get_socktype() const { return ai_.ai_socktype; } /*! * Get protocol. * * @return Protocol */ int get_protocol() const { return ai_.ai_protocol; } /*! * Get length of associated sockaddr struct * * @return Length of associated sockaddr struct */ socklen_t get_addrlen() const { return ai_.ai_addrlen; } /*! * Get associated Sockaddr object. * * @return Associated Sockaddr object */ Sockaddr get_addr() const { return Sockaddr(ai_.ai_addr, ai_.ai_addrlen); } /*! * Get string representation of the addrinfo. * * @return String representation of the addrinfo */ std::string to_string() const; private: addrinfo ai_; }; #endif /* __GU_RESOLVER_HPP__ */ galera-3-25.3.20/galerautils/src/gu_utils.c0000644000015300001660000000406013042054732020240 0ustar jenkinsjenkins// Copyright (C) 2010 Codership Oy /** * @file Miscellaneous utility functions * * $Id$ */ #include "gu_utils.h" #include #include #include #include #include #include const char* gu_str2ll (const char* str, long long* ll) { char* ret; int shift = 0; long long llret = strtoll (str, &ret, 0); switch (ret[0]) { case 't': case 'T': shift += 10; case 'g': case 'G': shift += 10; case 'm': case 'M': shift += 10; case 'k': case 'K': shift += 10; ret++; if (llret == ((llret << (shift + 1)) >> (shift + 1))) { llret <<= shift; } else { /* ERANGE */ if (llret > 0) llret = LLONG_MAX; else llret = LLONG_MIN; errno = ERANGE; } default: *ll = llret; } return ret; } const char* gu_str2dbl (const char* str, double* dbl) { char* ret; *dbl = strtod (str, &ret); return ret; } const char* gu_str2bool (const char* str, bool* b) { size_t len = strlen(str); int res = -1; /* no conversion */ switch (len) { case 1: switch (str[0]) { case '0': case 'N': case 'n': res = 0; break; case '1': case 'Y': case 'y': res = 1; break; } break; case 2: if (!strcasecmp(str, "on")) res = 1; if (!strcasecmp(str, "no")) res = 0; break; case 3: if (!strcasecmp(str, "off")) res = 0; if (!strcasecmp(str, "yes")) res = 1; break; case 4: if (!strcasecmp(str, "true")) res = 1; if (!strcasecmp(str, "sure")) res = 1; if (!strcasecmp(str, "nope")) res = 0; break; case 5: if (!strcasecmp(str, "false")) res = 0; break; } *b = (res > 0); return (res >= 0) ? (str + len) : str; } const char* gu_str2ptr (const char* str, void** ptr) { char* ret; *ptr = (void*) (intptr_t)strtoll (str, &ret, 16); return ret; } galera-3-25.3.20/galerautils/src/gu_config.hpp0000644000015300001660000001622513042054732020720 0ustar jenkinsjenkins// Copyright (C) 2010-2014 Codership Oy /** * @file * Configuration management class * * $Id$ */ #ifndef _gu_config_hpp_ #define _gu_config_hpp_ #include "gu_string_utils.hpp" #include "gu_exception.hpp" #include "gu_utils.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include #include namespace gu { class Config; } extern "C" const char* gu_str2ll (const char* str, long long* ll); class gu::Config { public: static const char PARAM_SEP; // parameter separator static const char KEY_VALUE_SEP; // key-value separator static const char ESCAPE; // escape symbol Config (); bool has (const std::string& key) const { return (params_.find(key) != params_.end()); } bool is_set (const std::string& key) const { param_map_t::const_iterator const i(params_.find(key)); if (i != params_.end()) { return i->second.is_set(); } else { throw NotFound(); } } /* adds parameter to the known parameter list */ void add (const std::string& key) { if (!has(key)) { params_[key] = Parameter(); } } /* adds parameter to the known parameter list and sets its value */ void add (const std::string& key, const std::string& value) { if (!has(key)) { params_[key] = Parameter(value); } } /* sets a known parameter to some value, otherwise throws NotFound */ void set (const std::string& key, const std::string& value) { param_map_t::iterator const i(params_.find(key)); if (i != params_.end()) { i->second.set(value); } else { #ifndef NDEBUG log_debug << "Key '" << key << "' not recognized."; #endif throw NotFound(); } } void set (const std::string& key, const char* value) { set(key, std::string(value)); } /* Parse a string of semicolumn separated key=value pairs into a vector. * Throws Exception in case of parsing error. */ static void parse (std::vector >& params_vector, const std::string& params_string); /* Parse a string of semicolumn separated key=value pairs and * set the values. * Throws NotFound if key was not explicitly added before. */ void parse (const std::string& params_string); /* General template for integer types */ template void set (const std::string& key, T val) { set_longlong (key, val); } /*! @throws NotSet, NotFound */ const std::string& get (const std::string& key) const { param_map_t::const_iterator const i(params_.find(key)); if (i == params_.end()) throw NotFound(); if (i->second.is_set()) return i->second.value(); log_debug << key << " not set."; throw NotSet(); } const std::string& get (const std::string& key, const std::string& def) const { try { return get(key); } catch (NotSet&) { return def ; } } /*! @throws NotFound */ template inline T get (const std::string& key) const { return from_config (get(key)); } template inline T get(const std::string& key, const T& def) const { try { return get(key); } catch (NotSet&) { return def; } } void print (std::ostream& os, bool include_not_set = false) const; /*! Convert string configuration values to other types. * General template for integers, specialized templates follow below. * @throw gu::Exception in case conversion failed */ template static inline T from_config (const std::string& value) { const char* str = value.c_str(); long long ret; errno = 0; // this is needed to detect overflow const char* endptr = gu_str2ll (str, &ret); check_conversion (str, endptr, "integer", ERANGE == errno); switch (sizeof(T)) { case 1: return overflow_char (ret); case 2: return overflow_short (ret); case 4: return overflow_int (ret); default: return ret; } } /* iterator stuff */ class Parameter { public: explicit Parameter(const std::string& value) : value_(value), set_(true) {} Parameter() : value_(), set_(false) {} const std::string& value() const { return value_; } bool is_set() const { return set_ ; } void set(const std::string& value) { value_ = value; set_ = true; } private: std::string value_; bool set_; }; typedef std::map param_map_t; typedef param_map_t::const_iterator const_iterator; const_iterator begin() const { return params_.begin(); } const_iterator end() const { return params_.end(); } private: static void check_conversion (const char* ptr, const char* endptr, const char* type, bool range_error = false); static char overflow_char(long long ret); static short overflow_short(long long ret); static int overflow_int(long long ret); void set_longlong (const std::string& key, long long value); param_map_t params_; }; extern "C" const char* gu_str2dbl (const char* str, double* dbl); extern "C" const char* gu_str2bool (const char* str, bool* bl); extern "C" const char* gu_str2ptr (const char* str, void** ptr); namespace gu { std::ostream& operator<<(std::ostream&, const gu::Config&); /*! Specialized templates for "funny" types */ template <> inline double Config::from_config (const std::string& value) { const char* str = value.c_str(); double ret; errno = 0; // this is needed to detect over/underflow const char* endptr = gu_str2dbl (str, &ret); check_conversion (str, endptr, "double", ERANGE == errno); return ret; } template <> inline bool Config::from_config (const std::string& value) { const char* str = value.c_str(); bool ret; const char* endptr = gu_str2bool (str, &ret); check_conversion (str, endptr, "boolean"); return ret; } template <> inline void* Config::from_config (const std::string& value) { const char* str = value.c_str(); void* ret; const char* endptr = gu_str2ptr (str, &ret); check_conversion (str, endptr, "pointer"); return ret; } template <> inline void Config::set (const std::string& key, const void* value) { set (key, to_string(value)); } template <> inline void Config::set (const std::string& key, double val) { set (key, to_string(val)); } template <> inline void Config::set (const std::string& key, bool val) { const char* val_str(val ? "YES" : "NO"); // YES/NO is most generic set (key, val_str); } } #endif /* _gu_config_hpp_ */ galera-3-25.3.20/galerautils/src/SConscript0000644000015300001660000000701413042054732020255 0ustar jenkinsjenkinsImport('env', 'x86', 'sysname') libgalerautils_env = env.Clone() # Include paths libgalerautils_env.Append(CPPPATH = Split(''' # ''')) # C part libgalerautils_sources = [ 'gu_abort.c', 'gu_dbug.c', 'gu_fifo.c', 'gu_lock_step.c', 'gu_log.c', 'gu_mem.c', 'gu_mmh3.c', 'gu_spooky.c', 'gu_crc32c.c', 'gu_rand.c', 'gu_mutex.c', 'gu_hexdump.c', 'gu_to.c', 'gu_utils.c', 'gu_uuid.c', 'gu_backtrace.c', 'gu_limits.c', 'gu_time.c', 'gu_init.c' ] libgalerautils_objs = libgalerautils_env.SharedObject(libgalerautils_sources) crc32c_sources = [ '#/www.evanjones.ca/crc32c.c' ] crc32c_env = env.Clone() crc32c_env.Append(CPPPATH = [ '#' ]) crc32c_env.Append(CPPFLAGS = ' -DWITH_GALERA') crc32c_sources = [ '#/www.evanjones.ca/crc32c.c' ] crc32c_objs = crc32c_env.SharedObject(crc32c_sources) if x86: crc32c_env.Append(CFLAGS = ' -msse4.2') if sysname == 'sunos': # Ideally we want to simply strip SSE4.2 flag from the resulting # crc32.pic.o # (see http://ffmpeg.org/pipermail/ffmpeg-user/2013-March/013977.html) # but that requires some serious scons-fu, so we just don't # compile hardware support in if host CPU does not have it. from subprocess import check_call try: check_call("isainfo -v | grep sse4.2 >/dev/null 2>&1", shell=True); except: libgalerautils_env.Append(CPPFLAGS = ' -DCRC32C_NO_HARDWARE') crc32c_env.Append(CPPFLAGS = ' -DCRC32C_NO_HARDWARE') libgalerautils_env.StaticLibrary('galerautils', libgalerautils_objs + crc32c_objs) env.Append(LIBGALERA_OBJS = libgalerautils_objs + crc32c_objs) #env.Append(LIBGALERA_OBJS = libgalerautils_env.SharedObject( # libgalerautils_sources)) #env.Append(LIBGALERA_OBJS = crc32c_objs) libgalerautilsxx_env = env.Clone() # Include paths libgalerautilsxx_env.Append(CPPPATH = Split(''' # #/common ''')) # disable old style cast warnings libgalerautilsxx_env.Append(CXXFLAGS = ' -Wno-old-style-cast') # C++ part libgalerautilsxx_sources = [ 'gu_vlq.cpp', 'gu_datetime.cpp', 'gu_exception.cpp', 'gu_logger.cpp', 'gu_prodcons.cpp', 'gu_regex.cpp', 'gu_string_utils.cpp', 'gu_uri.cpp', 'gu_buffer.cpp', 'gu_utils++.cpp', 'gu_config.cpp', 'gu_fdesc.cpp', 'gu_mmap.cpp', 'gu_alloc.cpp', 'gu_rset.cpp', 'gu_resolver.cpp', 'gu_histogram.cpp', 'gu_stats.cpp', 'gu_asio.cpp', 'gu_debug_sync.cpp', 'gu_thread.cpp' ] #libgalerautilsxx_objs = libgalerautilsxx_env.Object( # libgalerautilsxx_sources) libgalerautilsxx_sobjs = libgalerautilsxx_env.SharedObject( libgalerautilsxx_sources) #hexdump_obj = libgalerautilsxx_env.Object('gu_hexdump++','gu_hexdump.cpp') hexdump_sobj = libgalerautilsxx_env.SharedObject('gu_hexdump++','gu_hexdump.cpp') #libgalerautilsxx_objs += hexdump_obj libgalerautilsxx_sobjs += hexdump_sobj #if '-DGALERA_USE_GU_NETWORK' in libgalerautils_env['CPPFLAGS']: # libgalerautilsxx_sources = [libgalerautilsxx_sources, # 'gu_resolver.cpp'] libgalerautilsxx_env.StaticLibrary('galerautils++', libgalerautilsxx_sobjs) env.Append(LIBGALERA_OBJS = libgalerautilsxx_sobjs) galera-3-25.3.20/galerautils/src/gu_dbug.h0000644000015300001660000001453213042054732020033 0ustar jenkinsjenkins/****************************************************************************** * * * N O T I C E * * * * Copyright Abandoned, 1987, Fred Fish * * * * * * This previously copyrighted work has been placed into the public * * domain by the author and may be freely used for any purpose, * * private or commercial. * * * * Because of the number of inquiries I was receiving about the use * * of this product in commercially developed works I have decided to * * simply make it public domain to further its unrestricted use. I * * specifically would be most happy to see this material become a * * part of the standard Unix distributions by AT&T and the Berkeley * * Computer Science Research Group, and a standard part of the GNU * * system from the Free Software Foundation. * * * * I would appreciate it, as a courtesy, if this notice is left in * * all copies and derivative works. Thank you. * * * * The author makes no warranty of any kind with respect to this * * product and explicitly disclaims any implied warranties of mer- * * chantability or fitness for any particular purpose. * * * ****************************************************************************** */ /* * FILE * * dbug.c runtime support routines for dbug package * * SCCS * * @(#)dbug.c 1.25 7/25/89 * * DESCRIPTION * * These are the runtime support routines for the dbug package. * The dbug package has two main components; the user include * file containing various macro definitions, and the runtime * support routines which are called from the macro expansions. * * Externally visible functions in the runtime support module * use the naming convention pattern "_db_xx...xx_", thus * they are unlikely to collide with user defined function names. * * AUTHOR(S) * * Fred Fish (base code) * Enhanced Software Technologies, Tempe, AZ * asuvax!mcdphx!estinc!fnf * * Binayak Banerjee (profiling enhancements) * seismo!bpa!sjuvax!bbanerje * * Michael Widenius: * DBUG_DUMP - To dump a pice of memory. * PUSH_FLAG "O" - To be used instead of "o" if we don't * want flushing (for slow systems) * PUSH_FLAG "A" - as 'O', but we will append to the out file instead * of creating a new one. * Check of malloc on entry/exit (option "S") * * Alexey Yurchenko: * Renamed global symbols for use with galera project to avoid * collisions with other software (notably MySQL) * * $Id$ */ #ifndef _dbug_h #define _dbug_h #include #include typedef unsigned int uint; typedef unsigned long ulong; #define THREAD 1 #ifdef __cplusplus extern "C" { #endif extern char _gu_dig_vec[]; extern FILE* _gu_db_fp_; #define GU_DBUG_FILE _gu_db_fp_ #if defined(GU_DBUG_ON) && !defined(_lint) extern int _gu_db_on_; extern int _gu_no_db_; extern char* _gu_db_process_; extern int _gu_db_keyword_(const char* keyword); extern void _gu_db_setjmp_ (void); extern void _gu_db_longjmp_(void); extern void _gu_db_push_ (const char* control); extern void _gu_db_pop_ (void); extern void _gu_db_enter_ (const char* _func_, const char* _file_, uint _line_, const char** _sfunc_, const char** _sfile_, uint* _slevel_, char***); extern void _gu_db_return_ (uint _line_, const char** _sfunc_, const char** _sfile_, uint* _slevel_); extern void _gu_db_pargs_ (uint _line_, const char* keyword); extern void _gu_db_doprnt_ (const char* format, ...); extern void _gu_db_dump_ (uint _line_, const char *keyword, const char *memory, uint length); extern void _gu_db_lock_file (void); extern void _gu_db_unlock_file(void); #define GU_DBUG_ENTER(a) \ const char *_gu_db_func_, *_gu_db_file_; \ uint _gu_db_level_; \ char **_gu_db_framep_; \ _gu_db_enter_ (a, __FILE__, __LINE__, &_gu_db_func_, &_gu_db_file_, \ &_gu_db_level_, &_gu_db_framep_) #define GU_DBUG_LEAVE \ (_gu_db_return_ (__LINE__, &_gu_db_func_, &_gu_db_file_, \ &_gu_db_level_)) #define GU_DBUG_RETURN(a1) {GU_DBUG_LEAVE; return(a1);} #define GU_DBUG_VOID_RETURN {GU_DBUG_LEAVE; return; } #define GU_DBUG_EXECUTE(keyword,a1) \ {if (_gu_db_on_) {if (_gu_db_keyword_ (keyword)) { a1 }}} #define GU_DBUG_PRINT(keyword,arglist) \ {if (_gu_db_on_) {_gu_db_pargs_(__LINE__,keyword); \ _gu_db_doprnt_ arglist;}} #define GU_DBUG_PUSH(a1) _gu_db_push_ (a1) #define GU_DBUG_POP() _gu_db_pop_ () #define GU_DBUG_PROCESS(a1) (_gu_db_process_ = a1) #define GU_DBUG_SETJMP(a1) (_gu_db_setjmp_ (), setjmp (a1)) #define GU_DBUG_LONGJMP(a1,a2) (_gu_db_longjmp_ (), longjmp (a1, a2)) #define GU_DBUG_DUMP(keyword,a1,a2)\ {if (_gu_db_on_) {_gu_db_dump_(__LINE__,keyword,a1,a2);}} #define GU_DBUG_IN_USE (_gu_db_fp_ && _gu_db_fp_ != stderr) #define GU_DEBUGGER_OFF _no_gu_db_=1;_gu_db_on_=0; #define GU_DEBUGGER_ON _no_gu_db_=0 #define GU_DBUG_my_pthread_mutex_lock_FILE { _gu_db_lock_file(); } #define GU_DBUG_my_pthread_mutex_unlock_FILE { _gu_db_unlock_file(); } #define GU_DBUG_ASSERT(A) assert(A) #else /* No debugger */ #define GU_DBUG_ENTER(a1) #define GU_DBUG_RETURN(a1) return(a1) #define GU_DBUG_VOID_RETURN return #define GU_DBUG_EXECUTE(keyword,a1) {} #define GU_DBUG_PRINT(keyword,arglist) {} #define GU_DBUG_PUSH(a1) {} #define GU_DBUG_POP() {} #define GU_DBUG_PROCESS(a1) {} #define GU_DBUG_SETJMP setjmp #define GU_DBUG_LONGJMP longjmp #define GU_DBUG_DUMP(keyword,a1,a2) {} #define GU_DBUG_IN_USE 0 #define GU_DEBUGGER_OFF #define GU_DEBUGGER_ON #define GU_DBUG_my_pthread_mutex_lock_FILE #define GU_DBUG_my_pthread_mutex_unlock_FILE #define GU_DBUG_ASSERT(A) {} #endif #ifdef __cplusplus } #endif #endif galera-3-25.3.20/galerautils/src/gu_utils.h0000644000015300001660000000157313042054732020253 0ustar jenkinsjenkins// Copyright (C) 2010 Codership Oy /** * @file Miscellaneous utility functions * * $Id$ */ #ifndef _gu_utils_h_ #define _gu_utils_h_ #include #ifdef __cplusplus extern "C" { #endif /* * The string conversion functions below are slighly customized * versions of standard libc functions designed to understand 'on'/'off' and * K/M/G size modifiers and the like. * * They return pointer to the next character after conversion: * - if (ret == str) no conversion was made * - if (ret[0] == '\0') whole string was converted */ extern const char* gu_str2ll (const char* str, long long* ll); extern const char* gu_str2dbl (const char* str, double* dbl); extern const char* gu_str2bool (const char* str, bool* b); extern const char* gu_str2ptr (const char* str, void** ptr); #ifdef __cplusplus } #endif #endif /* _gu_utils_h_ */ galera-3-25.3.20/galerautils/src/gu_exception.hpp0000644000015300001660000000231213042054732021441 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * */ #ifndef __GU_EXCEPTION__ #define __GU_EXCEPTION__ #include #include #include "gu_errno.h" namespace gu { /*! Some utility exceptions to indicate special conditions. */ class NotSet {}; class NotFound {}; class Exception: public std::exception { public: Exception (const std::string& msg_, int err_) : msg (msg_), err (err_) {} virtual ~Exception () throw() {} const char* what () const throw() { return msg.c_str(); } int get_errno () const { return err; } void trace (const char* file, const char* func, int line); private: std::string msg; const int err; }; } /* to mark a place where exception was caught */ #define GU_TRACE(_exception_) _exception_.trace(__FILE__, __FUNCTION__, __LINE__) #ifndef NDEBUG /* enabled together with assert() */ #define gu_trace(_expr_) \ try { _expr_; } catch (gu::Exception& e) { GU_TRACE(e); throw; } #else #define gu_trace(_expr_) _expr_ #endif // NDEBUG #endif // __GU_EXCEPTION__ galera-3-25.3.20/galerautils/src/gu_init.c0000644000015300001660000000077113042054732020050 0ustar jenkinsjenkins/* * Copyright (C) 2013-2016 Codership Oy * * $Id$ */ #include "gu_conf.h" #include "gu_limits.h" #include "gu_abort.h" #include "gu_crc32c.h" void gu_init (gu_log_cb_t log_cb) { gu_conf_set_log_callback (log_cb); /* this is needed in gu::MMap::sync() */ size_t const page_size = GU_PAGE_SIZE; if (page_size & (page_size - 1)) { gu_fatal("GU_PAGE_SIZE(%z) is not a power of 2", GU_PAGE_SIZE); gu_abort(); } gu_crc32c_configure(); } galera-3-25.3.20/galerautils/src/gu_backtrace.c0000644000015300001660000000115113042054732021015 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy #include "gu_backtrace.h" #include "gu_log.h" #if defined(HAVE_EXECINFO_H) && defined(__GNUC__) #include #include char** gu_backtrace(int* size) { char** strings; void** array = malloc(*size * sizeof(void*)); if (!array) { gu_error("could not allocate memory for %d pointers\n", *size); return NULL; } *size = backtrace(array, *size); strings = backtrace_symbols(array, *size); free(array); return strings; } #else char **gu_backtrace(int* size) { return NULL; } #endif /* */ galera-3-25.3.20/galerautils/src/gu_fnv_bench.c0000644000015300001660000001403113042054732021027 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /*! * @file Benchmark for different hash implementations: * fnv32, fnv64, fnv128, mmh3, md5 from libssl and md5 from crypto++ * * To compile on Ubuntu: g++ -DHAVE_ENDIAN_H -DHAVE_BYTESWAP_H -DGALERA_LOG_H_ENABLE_CXX \ -O3 -march=native -msse4 -Wall -Werror -I../.. gu_fnv_bench.c gu_crc32c.c \ gu_mmh3.c gu_spooky.c gu_log.c ../../www.evanjones.ca/crc32c.c \ -lssl -lcrypto -lcrypto++ -o gu_fnv_bench * * on CentOS some play with -lcrypto++ may be needed (also see includes below) * * To run: * gu_fnv_bench */ #include "gu_crc32c.h" #include "gu_fnv.h" #include "gu_mmh3.h" #include "gu_spooky.h" #include "gu_hash.h" #include #include #include #include #include #define CRYPTOPP_ENABLE_NAMESPACE_WEAK 1 #include //#include enum algs { CRC32sw, CRC32hw, FNV32, FNV64, FNV128, MMH32, MMH128, SPOOKYS, SPOOKY, MD5SSL, MD5CPP, FAST128, TABLE }; static int timer (const void* const buf, ssize_t const len, long long const loops, enum algs const type) { double begin, end; struct timeval tv; const char* alg = "undefined"; size_t volatile h; // this variable serves to prevent compiler from // optimizing out the calls gettimeofday (&tv, NULL); begin = (double)tv.tv_sec + 1.e-6 * tv.tv_usec; long long i; #ifdef EXTERNAL_LOOP #define EXTERNAL_LOOP_BEGIN for (i = 0; i < loops; i++) { #define EXTERNAL_LOOP_END } #define INTERNAL_LOOP_BEGIN #define INTERNAL_LOOP_END #else #define EXTERNAL_LOOP_BEGIN #define EXTERNAL_LOOP_END #define INTERNAL_LOOP_BEGIN for (i = 0; i < loops; i++) { #define INTERNAL_LOOP_END } #endif EXTERNAL_LOOP_BEGIN switch (type) { case CRC32sw: case CRC32hw: { if (CRC32sw == type) alg = "crc32sw"; else alg = "crc32hw"; INTERNAL_LOOP_BEGIN // gu_crc32c_t crc = GU_CRC32C_INIT; h = gu_crc32c (buf, len); // h = hash; INTERNAL_LOOP_END break; } case FNV32: { alg = "fnv32a"; INTERNAL_LOOP_BEGIN uint32_t hash = GU_FNV32_SEED; gu_fnv32a_internal (buf, len, &hash); h = hash; INTERNAL_LOOP_END break; } case FNV64: { alg = "fnv64a"; INTERNAL_LOOP_BEGIN uint64_t hash = GU_FNV64_SEED;; gu_fnv64a_internal (buf, len, &hash); h = hash; INTERNAL_LOOP_END break; } case FNV128: { alg = "fnv128"; INTERNAL_LOOP_BEGIN gu_uint128_t hash = GU_FNV128_SEED; gu_fnv128a_internal (buf, len, &hash); #if defined(__SIZEOF_INT128__) h = hash; #else h = hash.u32[GU_32LO]; #endif INTERNAL_LOOP_END break; } case MMH32: { alg = "mmh32"; INTERNAL_LOOP_BEGIN h = gu_mmh32 (buf, len); INTERNAL_LOOP_END break; } case MMH128: { alg = "mmh128"; INTERNAL_LOOP_BEGIN gu_uint128_t hash; gu_mmh128 (buf, len, &hash); #if defined(__SIZEOF_INT128__) h = hash; #else h = hash.u32[GU_32LO]; #endif INTERNAL_LOOP_END break; } case SPOOKYS: { alg = "SpookyS"; INTERNAL_LOOP_BEGIN uint64_t hash[2]; gu_spooky_short (buf, len, hash); h = hash[0]; INTERNAL_LOOP_END break; } case SPOOKY: { alg = "Spooky"; INTERNAL_LOOP_BEGIN uint64_t hash[2]; gu_spooky_inline (buf, len, hash); h = hash[0]; INTERNAL_LOOP_END break; } case MD5SSL: { alg = "md5ssl"; INTERNAL_LOOP_BEGIN unsigned char md[MD5_DIGEST_LENGTH]; MD5 ((const unsigned char*)buf, len, md); INTERNAL_LOOP_END break; } case MD5CPP: { alg = "md5cpp"; INTERNAL_LOOP_BEGIN unsigned char md[16]; CryptoPP::Weak::MD5().CalculateDigest(md, (const byte*)buf, len); INTERNAL_LOOP_END break; } case FAST128: { alg = "fast128"; INTERNAL_LOOP_BEGIN uint64_t hash[2]; gu_fast_hash128 (buf, len, hash); h = hash[0]; INTERNAL_LOOP_END break; } case TABLE: { alg = "table"; INTERNAL_LOOP_BEGIN h = gu_table_hash (buf, len); INTERNAL_LOOP_END break; } } EXTERNAL_LOOP_END gettimeofday (&tv, NULL); end = (double)tv.tv_sec + 1.e-6 * tv.tv_usec; end -= begin; return printf ("%s: %lld loops, %6.3f seconds, %8.3f Mb/sec%s\n", alg, loops, end, (double)(loops * len)/end/1024/1024, h ? "" : " "); } int main (int argc, char* argv[]) { ssize_t buf_size = (1<<20); // 1Mb long long loops = 10000; if (argc > 1) buf_size = strtoll (argv[1], NULL, 10); if (argc > 2) loops = strtoll (argv[2], NULL, 10); /* initialization of data buffer */ ssize_t buf_size_int = buf_size / sizeof(int) + 1; int* buf = (int*) malloc (buf_size_int * sizeof(int)); if (!buf) return ENOMEM; while (buf_size_int) buf[--buf_size_int] = rand(); timer (buf, buf_size, loops, CRC32sw); CRC32CFunctionPtr const old = gu_crc32c_func; gu_crc32c_configure(); if (old != gu_crc32c_func) timer(buf, buf_size, loops, CRC32hw); timer (buf, buf_size, loops, FNV32); timer (buf, buf_size, loops, FNV64); timer (buf, buf_size, loops, FNV128); timer (buf, buf_size, loops, MMH32); timer (buf, buf_size, loops, MMH128); // timer (buf, buf_size, loops, SPOOKYS); timer (buf, buf_size, loops, SPOOKY); // timer (buf, buf_size, loops, MD5SSL); // timer (buf, buf_size, loops, MD5CPP); timer (buf, buf_size, loops, FAST128); timer (buf, buf_size, loops, TABLE); return 0; } galera-3-25.3.20/galerautils/src/gu_asio.hpp0000644000015300001660000001302713042054732020403 0ustar jenkinsjenkins// // Copyright (C) 2014 Codership Oy // // // Common ASIO methods and configuration options for Galera // #ifndef GU_ASIO_HPP #define GU_ASIO_HPP #include "gu_macros.h" // gu_likely() #include "common.h" // #ifndef HAVE_SYSTEM_ASIO // Make GCC to treat this as the system header to suppress compiler // warnings from embedded asio.hpp #pragma GCC system_header // Using embedded copy of ASIO requires turning off some // compiler warnings. #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) // Ignore possibly unknown warning flags # pragma GCC diagnostic ignored "-Wpragmas" # pragma GCC diagnostic ignored "-Weffc++" # pragma GCC diagnostic ignored "-Wold-style-cast" # pragma GCC diagnostic ignored "-Wunused-local-typedef" # pragma GCC diagnostic ignored "-Wunused-variable" #endif // __GNUG__ #endif // ! HAVE_SYSTEM_ASIO #include "asio.hpp" #include "asio/ssl.hpp" #include #include namespace gu { // URI schemes for networking namespace scheme { const std::string tcp("tcp"); /// TCP scheme const std::string udp("udp"); /// UDP scheme const std::string ssl("ssl"); /// SSL scheme const std::string def("tcp"); /// default scheme (TCP) } // // SSL // // Configuration options for sockets namespace conf { /// Enable SSL explicitly const std::string use_ssl("socket.ssl"); /// SSL cipher list const std::string ssl_cipher("socket.ssl_cipher"); /// SSL compression algorithm const std::string ssl_compression("socket.ssl_compression"); /// SSL private key file const std::string ssl_key("socket.ssl_key"); /// SSL certificate file const std::string ssl_cert("socket.ssl_cert"); /// SSL CA file const std::string ssl_ca("socket.ssl_ca"); /// SSL password file const std::string ssl_password_file("socket.ssl_password_file"); } // Return the cipher in use template static const char* cipher(asio::ssl::stream& socket) { return SSL_get_cipher_name(socket.impl()->ssl); } // Return the compression algorithm in use template static const char* compression(asio::ssl::stream& socket) { return SSL_COMP_get_name( SSL_get_current_compression(socket.impl()->ssl)); } // register ssl parameters to config void ssl_register_params(gu::Config&); // initialize defaults, verify set options void ssl_init_options(gu::Config&); // prepare asio::ssl::context using parameters from config void ssl_prepare_context(const gu::Config&, asio::ssl::context&, bool verify_peer_cert = true); // // Address manipulation helpers // // Return any address string. static inline std::string any_addr(const asio::ip::address& addr) { if (gu_likely(addr.is_v4() == true)) { return addr.to_v4().any().to_string(); } else { return addr.to_v6().any().to_string(); } } // Escape address string. Surrounds IPv6 address with []. // IPv4 addresses not affected. static inline std::string escape_addr(const asio::ip::address& addr) { if (gu_likely(addr.is_v4() == true)) { return addr.to_v4().to_string(); } else { return "[" + addr.to_v6().to_string() + "]"; } } // Unescape address string. Remove [] from around the address if found. static inline std::string unescape_addr(const std::string& addr) { std::string ret(addr); size_t pos(ret.find('[')); if (pos != std::string::npos) ret.erase(pos, 1); pos = ret.find(']'); if (pos != std::string::npos) ret.erase(pos, 1); return ret; } // // Error handling // // Exclude some errors which are generated by the SSL library. static inline bool exclude_ssl_error(const asio::error_code& ec) { switch (ERR_GET_REASON(ec.value())) { #ifdef SSL_R_SHORT_READ case SSL_R_SHORT_READ: // Short read error seems to be generated quite frequently // by SSL library, probably because broken connections. return true; #endif /* SSL_R_SHORT_READ */ default: return false; } } // Return low level error info for asio::error_code if available. static inline const std::string extra_error_info(const asio::error_code& ec) { std::ostringstream os; if (ec.category() == asio::error::get_ssl_category()) { char errstr[120] = {0, }; ERR_error_string_n(ec.value(), errstr, sizeof(errstr)); os << ec.value() << ": '" << errstr << "'"; } return os.str(); } // // Misc utilities // // Set common socket options template void set_fd_options(S& socket) { long flags(FD_CLOEXEC); if (fcntl(socket.native(), F_SETFD, flags) == -1) { gu_throw_error(errno) << "failed to set FD_CLOEXEC"; } } } #ifndef HAVE_SYSTEM_ASIO #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif #endif // ! HAVE_SYSTEM_ASIO #endif // GU_ASIO_HPP galera-3-25.3.20/galerautils/src/gu_utils++.cpp0000644000015300001660000000203213042054732020723 0ustar jenkinsjenkins// Copyright (C) 2009-2011 Codership Oy /** * @file General-purpose functions and templates * * $Id$ */ #include "gu_utils.hpp" #include "gu_string_utils.hpp" #include namespace gu { bool _to_bool (const std::string& s) { std::istringstream iss(s); bool ret; if ((iss >> ret).fail()) { /* if 1|0 didn't work, try true|false */ iss.clear(); iss.seekg(0); if ((iss >> std::boolalpha >> ret).fail()) { /* try on/off and yes/no */ std::string tmp(s); gu::trim(tmp); if (tmp.length() >=2 && tmp.length() <= 3) { std::transform (tmp.begin(), tmp.end(), tmp.begin(), static_cast(std::tolower)); if (tmp == "yes" || tmp == "on") return true; if (tmp == "off" || tmp == "no") return false; } throw NotFound(); } } return ret; } } // namespace gu galera-3-25.3.20/galerautils/src/gu_mmh3.c0000644000015300001660000000707613042054732017756 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file MurmurHash3 implementation * (slightly rewritten from the refrence C++ impl.) * * $Id$ */ #include "gu_mmh3.h" void gu_mmh3_32 (const void* const key, int const len, uint32_t const seed, void* const out) { uint32_t const res = _mmh32_seed (key, len, seed); *((uint32_t*)out) = gu_le32(res); } //----------------------------------------------------------------------------- #if 0 /* x86 variant is faulty and unsuitable for short keys, ignore */ void gu_mmh3_x86_128 (const void* key, const int len, const uint32_t seed, void* out) { const uint8_t* data = (const uint8_t*)key; const int nblocks = len >> 4; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; //---------- // body const uint32_t* blocks = (const uint32_t*)(data + (nblocks << 4)); int i; for(i = -nblocks; i; i++) { uint32_t k1 = gu_le32(blocks[(i << 2) + 0]); uint32_t k2 = gu_le32(blocks[(i << 2) + 1]); uint32_t k3 = gu_le32(blocks[(i << 2) + 2]); uint32_t k4 = gu_le32(blocks[(i << 2) + 3]); k1 *= c1; k1 = GU_ROTL32(k1,15); k1 *= c2; h1 ^= k1; h1 = GU_ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; k2 *= c2; k2 = GU_ROTL32(k2,16); k2 *= c3; h2 ^= k2; h2 = GU_ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; k3 *= c3; k3 = GU_ROTL32(k3,17); k3 *= c4; h3 ^= k3; h3 = GU_ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; k4 *= c4; k4 = GU_ROTL32(k4,18); k4 *= c1; h4 ^= k4; h4 = GU_ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; } //---------- // tail const uint8_t * tail = (const uint8_t*)(blocks); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch(len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = GU_ROTL32(k4,18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = GU_ROTL32(k3,17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = GU_ROTL32(k2,16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = GU_ROTL32(k1,15); k1 *= c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = _mmh3_fmix32(h1); h2 = _mmh3_fmix32(h2); h3 = _mmh3_fmix32(h3); h4 = _mmh3_fmix32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; ((uint32_t*)out)[0] = gu_le32(h1); ((uint32_t*)out)[1] = gu_le32(h2); ((uint32_t*)out)[2] = gu_le32(h3); ((uint32_t*)out)[3] = gu_le32(h4); } #endif /* 0 */ //----------------------------------------------------------------------------- void gu_mmh3_x64_128 (const void* key, int len, uint32_t const seed, void* const out) { uint64_t* const res = (uint64_t*)out; _mmh3_128_seed (key, len, seed, seed, res); res[0] = gu_le64(res[0]); res[1] = gu_le64(res[1]); } galera-3-25.3.20/galerautils/src/gu_rset.hpp0000644000015300001660000002622313042054732020427 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy */ /*! * @file common RecordSet interface * * Record set is a collection of serialized records of the same type. * * It stores them in an iovec-like collection of buffers before sending * and restores from a single buffer when receiving. * * $Id$ */ #ifndef _GU_RSET_HPP_ #define _GU_RSET_HPP_ #include "gu_vector.hpp" #include "gu_alloc.hpp" #include "gu_digest.hpp" #ifdef GU_RSET_CHECK_SIZE # include "gu_throw.hpp" #endif #include namespace gu { class RecordSet { public: enum Version { EMPTY = 0, VER1 }; static Version const MAX_VERSION = VER1; enum CheckType { CHECK_NONE = 0, CHECK_MMH32, CHECK_MMH64, CHECK_MMH128 }; /*! return total size of a RecordSet */ size_t size() const { return size_; } /*! return number of records in the record set */ int count() const { return count_; } typedef gu::Vector GatherVector; protected: ssize_t size_; int count_; Version version_; CheckType check_type_; /* ctor for RecordSetOut */ RecordSet (Version const version, CheckType const ct); /* ctor for RecordSetIn */ RecordSet () : size_(0), count_(0), version_(EMPTY), check_type_(CHECK_NONE) {} void init (const byte_t* buf, ssize_t size); ~RecordSet() {} }; #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif /*! class to store records in buffer(s) to send out */ class RecordSetOutBase : public RecordSet { public: typedef Allocator::BaseName BaseName; /*! return number of disjoint pages in the record set */ ssize_t page_count() const { return bufs_->size(); } /*! return vector of RecordSet fragments in adjusent order */ ssize_t gather (GatherVector& out); protected: RecordSetOutBase() : RecordSet() {} RecordSetOutBase (byte_t* reserved, size_t reserved_size, const BaseName& base_name, /* basename for on-disk * allocator */ CheckType ct, Version version = MAX_VERSION #ifdef GU_RSET_CHECK_SIZE ,ssize_t max_size = 0x7fffffff #endif ); /* this is to emulate partial specialization of function template through * overloading by parameter */ template struct HasPtr{}; /* variant for classes that don't provide ptr() method and need to be * explicitly serialized to internal storage */ template void process (const R& record, const byte_t*& ptr, bool& new_page, size_t const size, bool, HasPtr) { byte_t* const dst(alloc_.alloc (size, new_page)); new_page = (new_page || !prev_stored_); ptr = dst; #ifdef NDEBUG record.serialize_to (dst, size); #else size_t const ssize (record.serialize_to (dst, size)); assert (ssize == size); #endif } /* variant for classes that have ptr() method and can be either serialized * or referenced */ template void process (const R& record, const byte_t*& ptr, bool& new_page, size_t const size, bool const store, HasPtr) { if (store) { process (record, ptr, new_page, size, true, HasPtr()); } else { ptr = record.ptr(); new_page = true; } } template std::pair append_base (const R& record, bool const store = true, bool const new_record = true) { ssize_t const size (record.serial_size()); #ifdef GU_RSET_CHECK_SIZE if (gu_unlikely(size > max_size_ - size_)) gu_throw_error(EMSGSIZE); #endif bool new_page; const byte_t* ptr; process (record, ptr, new_page, size, store, HasPtr()); prev_stored_ = store; // make sure there is at least one record count_ += new_record || (0 == count_); post_append (new_page, ptr, size); return std::pair(ptr, size); } private: #ifdef GU_RSET_CHECK_SIZE ssize_t const max_size_; #endif Allocator alloc_; Hash check_; Vector bufs_; bool prev_stored_; void post_alloc (bool const new_page, const byte_t* const ptr, ssize_t const size); void post_append (bool const new_page, const byte_t* const ptr, ssize_t const size); int header_size () const; int header_size_max () const; /* Writes the header to the end of provided buffer, returns header * offset from ptr */ ssize_t write_header (byte_t* ptr, ssize_t size); }; /*! This is a small wrapper template for RecordSetOutBase to avoid templating * the whole thing instead of just the two append methods. */ template class RecordSetOut : public RecordSetOutBase { public: typedef RecordSetOutBase::BaseName BaseName; RecordSetOut() : RecordSetOutBase() {} RecordSetOut (byte_t* reserved, size_t reserved_size, const BaseName& base_name, CheckType ct, Version version = MAX_VERSION #ifdef GU_RSET_CHECK_SIZE ,ssize_t max_size = 0x7fffffff #endif ) : RecordSetOutBase (reserved, reserved_size, base_name, ct, version #ifdef GU_RSET_CHECK_SIZE ,max_size #endif ) {} std::pair append (const R& r) { return append_base (r); // return append_base (r); old append_base() method } std::pair append (const void* const src, ssize_t const size, bool const store = true, bool const new_record = true) { assert (src); assert (size); BufWrap bw (src, size); return append_base (bw, store, new_record); // return append_base (src, size, store); - old append_base() method } private: /*! a wrapper class to represent ptr and size as a serializable object: * simply defines serial_size(), ptr() and serialize_to() methods */ class BufWrap { const byte_t* const ptr_; size_t const size_; public: BufWrap (const void* const ptr, size_t const size) : ptr_(reinterpret_cast(ptr)), size_(size) {} size_t serial_size() const { return size_; } const byte_t* ptr() const { return ptr_; } size_t serialize_to (byte_t* const dst, size_t) const { ::memcpy (dst, ptr_, size_); return size_; } }; RecordSetOut (const RecordSetOut&); RecordSetOut& operator = (const RecordSetOut&); }; /*! class to recover records from a buffer */ class RecordSetInBase : public RecordSet { public: RecordSetInBase (const byte_t* buf,/* pointer to the beginning of buffer */ size_t size, /* total size of buffer */ bool check_now = true); /* checksum now */ /* this is a "delayed constructor", for the object created empty */ void init (const byte_t* buf, /* pointer to the beginning of buffer */ size_t size, /* total size of buffer */ bool check_now = true); /* checksum now */ void rewind() const { next_ = begin_; } void checksum() const; // throws if checksum fails uint64_t get_checksum() const; gu::Buf buf() const { gu::Buf ret = { head_, size_ }; return ret; } protected: template void next_base (Buf& n) const { if (gu_likely (next_ < size_)) { size_t const next_size(R::serial_size(head_ + next_, size_ -next_)); /* sanity check */ if (gu_likely (next_ + next_size <= size_t(size_))) { n.ptr = head_ + next_; n.size = next_size; next_ += next_size; return; } throw_error (E_FAULT); } assert (next_ == size_); throw_error (E_PERM); } template R next_base () const { if (gu_likely (next_ < size_)) { R const rec(head_ + next_, size_ - next_); size_t const tmp_size(rec.serial_size()); /* sanity check */ if (gu_likely (next_ + tmp_size <= size_t(size_))) { next_ += tmp_size; return rec; } throw_error (E_FAULT); } assert (next_ == size_); throw_error (E_PERM); } private: const byte_t* head_; /* pointer to header */ ssize_t mutable next_; /* offset to next record */ short begin_; /* offset to first record */ /* size_ from parent class is offset past all records */ /* takes total size of the supplied buffer */ void parse_header_v1 (size_t size); enum Error { E_PERM, E_FAULT }; GU_NORETURN void throw_error (Error code) const; /* shallow copies here - we're not allocating anything */ RecordSetInBase (const RecordSetInBase& r) : RecordSet (r), head_ (r.head_), next_ (r.next_), begin_ (r.begin_) {} RecordSetInBase& operator= (const RecordSetInBase r); #if 0 { std::swap(head_, r.head_); std::swap(next_, r.next_); std::swap(begin, r.begin_); } #endif }; /* class RecordSetInBase */ /*! This is a small wrapper template for RecordSetInBase to avoid templating * the whole thing instead of just the two next methods. */ template class RecordSetIn : public RecordSetInBase { public: RecordSetIn (const void* buf,/* pointer to the beginning of buffer */ size_t size, /* total size of buffer */ bool check_first = true) /* checksum now */ : RecordSetInBase (reinterpret_cast(buf), size, check_first) {} RecordSetIn () : RecordSetInBase (NULL, 0, false) {} void next (Buf& n) const { next_base (n); } R next () const { return next_base (); } }; /* class RecordSetIn */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif } /* namespace gu */ #endif /* _GU_RSET_HPP_ */ galera-3-25.3.20/galerautils/src/gu_uri.cpp0000644000015300001660000001734713042054732020253 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! @todo: scheme and host parts should be normalized to lower case except for * %-encodings, which should go upper-case */ #include #include #include #include "gu_assert.h" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_string_utils.hpp" // strsplit() #include "gu_uri.hpp" using std::string; using std::vector; using std::multimap; static void parse_authority (const string& auth, gu::RegEx::Match& user, gu::RegEx::Match& host, gu::RegEx::Match& port) { size_t pos1, pos2; pos1 = auth.find_first_of ('@'); if (pos1 != string::npos) { user = gu::RegEx::Match (auth.substr(0, pos1)); pos1 += 1; // pos1 now points past the first occurence of @, // which may be past the end of the string. } else { pos1 = 0; } pos2 = auth.find_last_of (':'); if (pos2 != string::npos && pos2 >= pos1) { host = gu::RegEx::Match (auth.substr (pos1, pos2 - pos1)); // according to RFC 3986 empty port (nothing after :) should be treated // as unspecified, so make sure that it is not 0-length. if ((pos2 + 1) < auth.length()) { port = gu::RegEx::Match (auth.substr (pos2 + 1)); if ((port.str().find_first_not_of ("0123456789") != string::npos) || // @todo: possible port range is not determined in RFC 3986 (65535 < gu::from_string (port.str()))) { log_debug << "\n\tauth: '" << auth << "'" << "\n\thost: '" << host.str() << "'" << "\n\tport: '" << port.str() << "'" << "\n\tpos1: " << pos1 << ", pos2: " << pos2; gu_throw_error (EINVAL) << "Can't parse port number from '" << port.str() << "'"; } } } else { host = gu::RegEx::Match (auth.substr (pos1)); } } static gu::URIQueryList extract_query_list(const string& s, const string& query) { gu::URIQueryList ret; // scan all key=value pairs vector qlist = gu::strsplit(query, '&'); for (vector::const_iterator i = qlist.begin(); i != qlist.end(); ++i) { vector kvlist = gu::strsplit(*i, '='); if (kvlist.size() != 2) { gu_throw_error (EINVAL) << "Invalid URI query part: '" << *i << "'"; } ret.insert(make_pair(kvlist[0], kvlist[1])); } return ret; } gu::URI::URI(const string& uri_str, bool const strict) : modified_ (true), // recompose to normalize on the first call to_string() str_ (uri_str), scheme_ (), authority_ (), path_ (), fragment_ (), query_list_() { parse(uri_str, strict); } /*! regexp suggested by RFC 3986 to parse URI into 5 canonical parts */ const char* const gu::URI::uri_regex_ = "^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?"; /* 12 3 4 5 6 7 8 9 */ /*! positions of URI components as matched by the above pattern */ enum { SCHEME = 2, AUTHORITY = 4, PATH = 5, QUERY = 7, FRAGMENT = 9, NUM_PARTS }; gu::RegEx const gu::URI::regex_(uri_regex_); static string const UNSET_SCHEME("unset://"); void gu::URI::parse (const string& uri_str, bool const strict) { log_debug << "URI: " << uri_str; vector parts; if (!strict && uri_str.find("://") == std::string::npos) { string tmp = UNSET_SCHEME + uri_str; parts = regex_.match (tmp, NUM_PARTS); } else { parts = regex_.match (uri_str, NUM_PARTS); scheme_ = parts[SCHEME]; //set scheme only if it was explicitly provided } if (strict && (!scheme_.is_set() || !scheme_.str().length())) { gu_throw_error (EINVAL) << "URI '" << uri_str << "' has empty scheme"; } try { std::vector auth_list( strsplit(parts[AUTHORITY].str(), ',')); for (size_t i(0); i < auth_list.size(); ++i) { Authority auth; parse_authority (auth_list[i], auth.user_, auth.host_, auth.port_); authority_.push_back(auth); } } catch (NotSet&) { authority_.push_back(Authority()); } path_ = parts[PATH]; if (!parts[AUTHORITY].is_set() && !path_.is_set()) { gu_throw_error (EINVAL) << "URI '" << uri_str << "' has no hierarchical part"; } try { query_list_ = extract_query_list(str_, parts[QUERY].str()); } catch (NotSet&) {} fragment_ = parts[FRAGMENT]; #if 0 try { log_debug << "Base URI: " << scheme.str() << "://" << get_authority(); } catch (NotSet&) {} #endif } std::string gu::URI::get_authority(const gu::URI::Authority& authority) const { const RegEx::Match& user(authority.user_); const RegEx::Match& host(authority.host_); const RegEx::Match& port(authority.port_); if (!user.is_set() && !host.is_set()) throw NotSet(); size_t auth_len = 0; if (user.is_set()) auth_len += user.str().length() + 1; if (host.is_set()) { auth_len += host.str().length(); if (port.is_set()) auth_len += port.str().length() + 1; } string auth; auth.reserve (auth_len); if (user.is_set()) { auth += user.str(); auth += '@'; } if (host.is_set()) { auth += host.str(); if (port.is_set()) { auth += ':'; auth += port.str(); } } return auth; } string gu::URI::get_authority() const { if (authority_.empty()) return ""; return get_authority(authority_.front()); } void gu::URI::recompose() const { size_t l = str_.length(); str_.clear (); str_.reserve (l); // resulting string length will be close to this if (scheme_.is_set()) { str_ += scheme_.str(); str_ += ':'; } str_ += "//"; for (AuthorityList::const_iterator i(authority_.begin()); i != authority_.end(); ++i) { AuthorityList::const_iterator i_next(i); ++i_next; try { string auth = get_authority(*i); str_ += auth; } catch (NotSet&) {} if (i_next != authority_.end()) str_ += ","; } if (path_.is_set()) str_ += path_.str(); if (query_list_.size() > 0) { str_ += '?'; } URIQueryList::const_iterator i = query_list_.begin(); while (i != query_list_.end()) { str_ += i->first + '=' + i->second; URIQueryList::const_iterator i_next = i; ++i_next; if (i_next != query_list_.end()) { str_ += '&'; } i = i_next; } if (fragment_.is_set()) { str_ += '#'; str_ += fragment_.str(); } } void gu::URI::set_query_param(const string& key, const string& val, bool override) { if (override == false) { query_list_.insert(make_pair(key, val)); } else { URIQueryList::iterator i(query_list_.find(key)); if (i == query_list_.end()) { query_list_.insert(make_pair(key, val)); } else { i->second = val; } } modified_ = true; } const std::string& gu::URI::get_option (const std::string& name) const { gu::URIQueryList::const_iterator i = query_list_.find(name); if (i == query_list_.end()) throw NotFound(); return i->second; } galera-3-25.3.20/galerautils/src/gu_exception.cpp0000644000015300001660000000071413042054732021440 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * */ #include #include "gu_utils.hpp" #include "gu_exception.hpp" namespace gu { void Exception::trace (const char* file, const char* func, int line) { msg.reserve (msg.length() + ::strlen(file) + ::strlen(func) + 15); msg += "\n\t at "; msg += file; msg += ':'; msg += func; msg += "():"; msg += to_string(line); } } galera-3-25.3.20/galerautils/src/gu_backtrace.h0000644000015300001660000000141113042054732021021 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy #ifndef GU_BACKTRACE_H #define GU_BACKTRACE_H #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /*! * Get current backtrace. Return buffer will contain backtrace symbols if * available. NULL pointer is returned if getting backtrace is not supported * on current platform. Maximum number of frames in backtrace is passed * in size parameter, number of frames in returned backtrace is assigned * in size parameter on return. * * @param size Pointer to integer containing maximum number of frames * in backtrace * * @return Allocated array of strings containing backtrace symbols */ char** gu_backtrace(int* size); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* GU_BACKTRACE_H */ galera-3-25.3.20/galerautils/src/gu_hexdump.hpp0000644000015300001660000000201713042054732021117 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file operator << for hexdumps. * * Usage: std::cout << gu::Hexdump(ptr, size) * * $Id$ */ #ifndef _GU_HEXDUMP_HPP_ #define _GU_HEXDUMP_HPP_ #include "gu_types.hpp" #include namespace gu { class Hexdump { public: Hexdump (const void* const buf, size_t const size, bool const alpha = false) : buf_ (reinterpret_cast(buf)), size_ (size), alpha_(alpha) {} std::ostream& to_stream (std::ostream& os) const; // according to clang C++98 wants copy ctor to be public for temporaries Hexdump (const Hexdump& h) : buf_(h.buf_), size_(h.size_), alpha_(h.alpha_) {} private: const byte_t* const buf_; size_t const size_; bool const alpha_; Hexdump& operator = (const Hexdump&); }; inline std::ostream& operator << (std::ostream& os, const Hexdump& h) { return h.to_stream(os); } } #endif /* _GU_HEXDUMP_HPP_ */ galera-3-25.3.20/galerautils/src/gu_log.c0000644000015300001660000000775213042054732017674 0ustar jenkinsjenkins// Copyright (C) 2007-2014 Codership Oy /** * @file Logging functions definitions * * $Id$ */ #include #include #include #include #include #include #include #include "gu_log.h" #include "gu_macros.h" /* Global configurable variables */ static FILE* gu_log_file = NULL; bool gu_log_self_tstamp = false; gu_log_severity_t gu_log_max_level = GU_LOG_INFO; int gu_conf_set_log_file (FILE *file) { gu_debug ("Log file changed by application"); if (file) { gu_log_file = file; } else { gu_log_file = stderr; } return 0; } int gu_conf_self_tstamp_on () { gu_debug ("Turning self timestamping on"); gu_log_self_tstamp = true; return 0; } int gu_conf_self_tstamp_off () { gu_debug ("Turning self timestamping off"); gu_log_self_tstamp = false; return 0; } int gu_conf_debug_on () { gu_log_max_level = GU_LOG_DEBUG; gu_debug ("Turning debug logging on"); return 0; } int gu_conf_debug_off () { gu_debug ("Turning debug logging off"); gu_log_max_level = GU_LOG_INFO; return 0; } /** Returns current timestamp in the provided buffer */ static inline int log_tstamp (char* tstamp, size_t const len) { int ret = 0; struct tm date; struct timeval time; gettimeofday (&time, NULL); localtime_r (&time.tv_sec, &date); /* 23 symbols */ ret = snprintf (tstamp, len, "%04d-%02d-%02d %02d:%02d:%02d.%03d ", date.tm_year + 1900, date.tm_mon + 1, date.tm_mday, date.tm_hour, date.tm_min, date.tm_sec, (int)time.tv_usec / 1000); return ret; } const char* gu_log_level_str[GU_LOG_DEBUG + 2] = { "FATAL: ", "ERROR: ", " WARN: ", " INFO: ", "DEBUG: ", "XXXXX: " }; /** * @function * Default logging function: simply writes to stderr or gu_log_file if set. */ void gu_log_cb_default (int severity, const char* msg) { FILE* log_file = gu_log_file ? gu_log_file : stderr; fputs (msg, log_file); fputc ('\n', log_file); fflush (log_file); } /** * Log function handle. * Can be changed by application through gu_conf_set_log_callback() */ gu_log_cb_t gu_log_cb = gu_log_cb_default; int gu_conf_set_log_callback (gu_log_cb_t callback) { if (callback) { gu_debug ("Logging function changed by application"); gu_log_cb = callback; } else { gu_debug ("Logging function restored to default"); gu_log_cb = gu_log_cb_default; } return 0; } int gu_log (gu_log_severity_t severity, const char* file, const char* function, const int line, ...) { va_list ap; int max_string = 2048; char string[max_string]; /** @note: this can cause stack overflow * in kernel mode (both Linux and Windows). */ char* str = string; int len; if (gu_log_self_tstamp) { len = log_tstamp (str, max_string); str += len; max_string -= len; } if (gu_likely(max_string > 0)) { const char* log_level_str = gu_log_cb_default == gu_log_cb ? gu_log_level_str[severity] : ""; /* provide file:func():line info only if debug logging is on */ if (gu_likely(!gu_log_debug && severity > GU_LOG_ERROR)) { len = snprintf (str, max_string, "%s", log_level_str); } else { len = snprintf (str, max_string, "%s%s:%s():%d: ", log_level_str, file, function, line); } str += len; max_string -= len; va_start (ap, line); { const char* format = va_arg (ap, const char*); if (gu_likely(max_string > 0 && NULL != format)) { vsnprintf (str, max_string, format, ap); } } va_end (ap); } /* actual logging */ gu_log_cb (severity, string); return 0; } galera-3-25.3.20/galerautils/src/gu_serializable.hpp0000644000015300001660000000603113042054732022113 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file Declaration of serializeble interface that all serializable classes * should inherit. * * $Id$ */ #ifndef GU_SERIALIZABLE_HPP #define GU_SERIALIZABLE_HPP #include "gu_types.hpp" #include "gu_throw.hpp" #include "gu_assert.hpp" #include #include // for std::length_error namespace gu { class Serializable { public: /*! returns the size of a buffer required to serialize the object */ ssize_t serial_size () const { return my_serial_size(); } /*! * serializes this object into buf and returns serialized size * * @param buf pointer to buffer * @param size size of buffer * @return serialized size * * may throw exceptions */ ssize_t serialize_to (void* const buf, ssize_t const size) const { return my_serialize_to (buf, size); } /*! * serializes this object into byte vector v, reallocating it if needed * returns the size of serialized object */ ssize_t serialize_to (std::vector& v) const { size_t const old_size (v.size()); size_t const new_size (serial_size() + old_size); try { v.resize (new_size, 0); } catch (std::length_error& l) { gu_throw_error(EMSGSIZE) << "length_error: " << l.what(); } catch (...) { gu_throw_error(ENOMEM) << "could not resize to " << new_size << " bytes"; } try { return serialize_to (&v[old_size], new_size - old_size); } catch (...) { v.resize (old_size); throw; } } protected: ~Serializable() {} private: virtual ssize_t my_serial_size () const = 0; virtual ssize_t my_serialize_to (void* buf, ssize_t size) const = 0; }; static inline std::vector& operator << (std::vector& out, const Serializable& s) { s.serialize_to (out); return out; } #if 0 // seems to be a pointless idea class DeSerializable { public: /* serial size of an object stored at ptr, may be not implemented */ template static ssize_t serial_size (const byte_t* const buf, ssize_t const size) { assert (size > 0); return DS::my_serial_size (buf, size); } /* serial size of an object stored at ptr, may be not implemented */ ssize_t deserialize_from (const byte_t* const buf, ssize_t const size) { assert (size > 0); return my_deserialize_from (buf, size); } ssize_t deserialize_from (const std::vector& in,size_t const offset) { return deserialize_from (&in[offset], in.size() - offset); } protected: ~DeSerializable() {} private: /* serial size of an object stored at ptr, may be not implemented */ virtual ssize_t my_deserialize_from (const byte_t* buf, ssize_t size) = 0; }; #endif // 0 } /* namespace gu */ #endif /* GU_SERIALIZABLE_HPP */ galera-3-25.3.20/galerautils/src/gu_crc32c.c0000644000015300001660000000133313042054732020157 0ustar jenkinsjenkins/* * Copyright (C) 2013 Codership Oy * * $Id$ */ #include "gu_crc32c.h" #include "gu_log.h" #include CRC32CFunctionPtr gu_crc32c_func = crc32cSlicingBy8; // some sensible default void gu_crc32c_configure() { gu_crc32c_func = detectBestCRC32C(); #if !defined(CRC32C_NO_HARDWARE) if (gu_crc32c_func == crc32cHardware64 || gu_crc32c_func == crc32cHardware32) { gu_info ("CRC-32C: using hardware acceleration."); } else #endif /* !CRC32C_NO_HARDWARE */ if (gu_crc32c_func == crc32cSlicingBy8) { gu_info ("CRC-32C: using \"slicing-by-8\" algorithm."); } else { gu_fatal ("unexpected CRC-32C implementation."); abort(); } } galera-3-25.3.20/galerautils/src/gu_lock_step.h0000644000015300001660000000164213042054732021073 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ // This is a small class to facilitate lock-stepping in multithreaded unit tests #ifndef _gu_lock_step_h_ #define _gu_lock_step_h_ #include #include "gu_mutex.h" typedef struct gu_lock_step { gu_mutex_t mtx; gu_cond_t cond; long wait; long cont; bool enabled; } gu_lock_step_t; extern void gu_lock_step_init (gu_lock_step_t* ls); /* enable or disable lock-stepping */ extern void gu_lock_step_enable (gu_lock_step_t* ls, bool enable); extern void gu_lock_step_wait (gu_lock_step_t* ls); /* returns how many waiters there were, * waits for timeout_ms milliseconds if no waiters, if timeout_ms < 0 waits forever, * if 0 - no wait at all */ extern long gu_lock_step_cont (gu_lock_step_t* ls, long timeout_ms); extern void gu_lock_step_destroy (gu_lock_step_t* ls); #endif /* _gu_lock_step_h_ */ galera-3-25.3.20/galerautils/src/gu_backtrace.hpp0000644000015300001660000000256513042054732021374 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy #ifndef GU_BACKTRACE_HPP #define GU_BACKTRACE_HPP #include "gu_backtrace.h" #include #include namespace gu { /*! * Utility class to print backtraces. */ class Backtrace { public: /*! * Construct backtrace object. * * @param Maximum number of backtrace symbols resolved (default 50). */ Backtrace(int size = 50) : symbols_size_(size), symbols_(gu_backtrace(&symbols_size_)) { } ~Backtrace() { free(symbols_); } /*! * Print backtrace into ostream. * * @param os Ostream to print backtrace into. * @param delim Delimiter separating backtrace symbols. */ void print(std::ostream& os, char delim = '\n') { if (symbols_ != 0) { for (int i(0); i < symbols_size_; ++i) { os << symbols_[i] << delim; } } else { os << "no backtrace available"; } } private: Backtrace(const Backtrace&); void operator=(const Backtrace&); int symbols_size_; char** symbols_; }; } #endif // GU_BACKTRACE_HPP galera-3-25.3.20/galerautils/src/gu_types.hpp0000644000015300001660000000042413042054732020611 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file Location of some "standard" types definitions * * $Id$ */ #ifndef _GU_TYPES_HPP_ #define _GU_TYPES_HPP_ #include "gu_types.h" namespace gu { typedef gu_byte_t byte_t; } #endif /* _GU_TYPES_HPP_ */ galera-3-25.3.20/galerautils/src/gu_alloc.cpp0000644000015300001660000001074713042054732020543 0ustar jenkinsjenkins/* Copyright (C) 2013-2016 Codership Oy */ /*! * @file allocator main functions * * $Id$ */ #include "gu_alloc.hpp" #include "gu_throw.hpp" #include "gu_assert.hpp" #include "gu_limits.h" #include #include // for std::setfill() and std::setw() gu::Allocator::HeapPage::HeapPage (page_size_type const size) : Page (static_cast(::malloc(size)), size) { if (0 == base_ptr_) gu_throw_error (ENOMEM); } gu::Allocator::Page* gu::Allocator::HeapStore::my_new_page (page_size_type const size) { if (gu_likely(size <= left_)) { /* to avoid too frequent allocation, make it (at least) 64K */ static page_size_type const PAGE_SIZE(gu_page_size_multiple(1 << 16)); page_size_type const page_size (std::min(std::max(size, PAGE_SIZE), left_)); Page* ret = new HeapPage (page_size); assert (ret != 0); left_ -= page_size; return ret; } gu_throw_error (ENOMEM) << "out of memory in RAM pool"; } gu::Allocator::FilePage::FilePage (const std::string& name, page_size_type const size) : Page (0, 0), fd_ (name, size, false, false), mmap_(fd_, true) { base_ptr_ = reinterpret_cast(mmap_.ptr); ptr_ = base_ptr_; left_ = mmap_.size; } gu::Allocator::Page* gu::Allocator::FileStore::my_new_page (page_size_type const size) { Page* ret = 0; try { std::ostringstream fname; fname << base_name_ << '.' << std::dec << std::setfill('0') << std::setw(6) << n_; ret = new FilePage(fname.str(), std::max(size, page_size_)); assert (ret != 0); ++n_; } catch (std::exception& e) { gu_throw_error(ENOMEM) << e.what(); } return ret; } #ifdef GU_ALLOCATOR_DEBUG void gu::Allocator::add_current_to_bufs() { page_size_type const current_size (current_page_->size()); if (current_size) { if (bufs_->empty() || bufs_->back().ptr != current_page_->base()) { Buf b = { current_page_->base(), current_size }; bufs_->push_back (b); } else { bufs_->back().size = current_size; } } } size_t gu::Allocator::gather (std::vector& out) const { if (bufs_().size()) out.insert (out.end(), bufs_().begin(), bufs_().end()); Buf b = { current_page_->base(), current_page_->size() }; out.push_back (b); return size_; } #endif /* GU_ALLOCATOR_DEBUG */ gu::byte_t* gu::Allocator::alloc (page_size_type const size, bool& new_page) { new_page = false; if (gu_unlikely(0 == size)) return 0; byte_t* ret = current_page_->alloc (size); if (gu_unlikely(0 == ret)) { Page* np = 0; try { np = current_store_->new_page(size); } catch (Exception& e) { if (current_store_ != &heap_store_) throw; /* no fallbacks left */ /* fallback to disk store */ current_store_ = &file_store_; np = current_store_->new_page(size); } assert (np != 0); // it should have thrown above pages_().push_back (np); #ifdef GU_ALLOCATOR_DEBUG add_current_to_bufs(); #endif /* GU_ALLOCATOR_DEBUG */ current_page_ = np; new_page = true; ret = np->alloc (size); assert (ret != 0); // the page should be sufficiently big } size_ += size; return ret; } gu::Allocator::BaseNameDefault const gu::Allocator::BASE_NAME_DEFAULT; gu::Allocator::Allocator (const BaseName& base_name, byte_t* reserved, page_size_type reserved_size, heap_size_type max_ram, page_size_type disk_page_size) : first_page_ (reserved, reserved_size), current_page_ (&first_page_), heap_store_ (max_ram), file_store_ (base_name, disk_page_size), current_store_(&heap_store_), pages_ (), #ifdef GU_ALLOCATOR_DEBUG bufs_ (), #endif /* GU_ALLOCATOR_DEBUG */ size_ (0) { assert (NULL != reserved || 0 == reserved_size); assert (current_page_ != 0); pages_->push_back (current_page_); } gu::Allocator::~Allocator () { for (int i(pages_->size() - 1); i > 0 /* don't delete first_page_ - we didn't allocate it */; --i) { delete (pages_[i]); } } galera-3-25.3.20/galerautils/src/gu_stats.cpp0000644000015300001660000000321113042054732020573 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #include #include #include #include "gu_macros.h" #include "gu_stats.hpp" // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // http://www.johndcook.com/standard_deviation.html void gu::Stats::insert(const double val) { n_++; if (gu_unlikely(n_ == 1)) { old_m_ = new_m_ = val; old_s_ = new_s_ = 0.0; min_ = val; max_ = val; } else { new_m_ = old_m_ + (val - old_m_) / n_; new_s_ = old_s_ + (val - old_m_) * (val - new_m_); old_m_ = new_m_; old_s_ = new_s_; min_ = std::min(min_, val); max_ = std::max(max_, val); } } // I guess it's okay to assign 0.0 if no data. double gu::Stats::min() const { return gu_likely(n_ > 0) ? min_ : 0.0; } double gu::Stats::max() const { return gu_likely(n_ > 0) ? max_ : 0.0; } double gu::Stats::mean() const { return gu_likely(n_ > 0) ? new_m_ : 0.0; } double gu::Stats::variance() const { // n_ > 1 ? new_s_ / (n_ - 1) : 0.0; // is to compute unbiased sample variance // not population variance. return gu_likely(n_ > 0) ? new_s_ / n_ : 0.0; } double gu::Stats::std_dev() const { return sqrt(variance()); } std::string gu::Stats::to_string() const { std::ostringstream os; os << *this; return os.str(); } std::ostream& gu::operator<<(std::ostream& os, const gu::Stats& stats) { return (os << stats.min() << "/" << stats.mean() << "/" << stats.max() << "/" << stats.std_dev() << "/" << stats.times()); } galera-3-25.3.20/galerautils/src/gu_fnv.h0000644000015300001660000001311613042054732017700 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /*! * @file * * This header file defines FNV hash functions for 3 hash sizes: * 4, 8 and 16 bytes. * * Be wary of bitshift multiplication "optimization" (FNV_BITSHIFT_OPTIMIZATION): * FNV authors used to claim marginal speedup when using it, however on core2 * CPU it has shown no speedup for fnv32a and more than 2x slowdown for fnv64a * and fnv128a. Disabled by default. * * FNV vs. FNVa: FNVa has a better distribution: multiplication happens after * XOR and hence propagates XOR effect to all bytes of the hash. * Hence by default functions perform FNVa. GU_FNV_NORMAL macro * is needed for unit tests. * * gu_fnv*_internal() functions are endian-unsafe, their output should be * converted to little-endian format if it is to be exported to other machines. */ #ifndef _gu_fnv_h_ #define _gu_fnv_h_ #include "gu_int128.h" #include #include #include #include // ssize_t #include #define GU_FNV32_PRIME 16777619UL #define GU_FNV32_SEED 2166136261UL #if !defined(GU_FNVBITSHIFT_OPTIMIZATION) # define GU_FNV32_MUL(_x) _x *= GU_FNV32_PRIME #else /* GU_FNVBITSHIFT_OPTIMIZATION */ # define GU_FNV32_MUL(_x) \ _x += (_x << 1) + (_x << 4) + (_x << 7) + (_x << 8) + (_x << 24) #endif /* GU_FNVBITSHIFT_OPTIMIZATION */ #if !defined(GU_FNV_NORMAL) # define GU_FNV32_ITERATION(_s,_b) _s ^= _b; GU_FNV32_MUL(_s); #else # define GU_FNV32_ITERATION(_s,_b) GU_FNV32_MUL(_s); _s ^= _b; #endif static GU_FORCE_INLINE void gu_fnv32a_internal (const void* buf, ssize_t const len, uint32_t* seed) { const uint8_t* bp = (const uint8_t*)buf; const uint8_t* const be = bp + len; while (bp + 2 <= be) { GU_FNV32_ITERATION(*seed,*bp++); GU_FNV32_ITERATION(*seed,*bp++); } if (bp < be) { GU_FNV32_ITERATION(*seed,*bp++); } assert(be == bp); } #define GU_FNV64_PRIME 1099511628211ULL #define GU_FNV64_SEED 14695981039346656037ULL #if !defined(GU_FNVBITSHIFT_OPTIMIZATION) # define GU_FNV64_MUL(_x) _x *= GU_FNV64_PRIME #else /* GU_FNVBITSHIFT_OPTIMIZATION */ # define GU_FNV64_MUL(_x) \ _x +=(_x << 1) + (_x << 4) + (_x << 5) + (_x << 7) + (_x << 8) + (_x << 40); #endif /* GU_FNVBITSHIFT_OPTIMIZATION */ #if !defined(GU_FNV_NORMAL) # define GU_FNV64_ITERATION(_s,_b) _s ^= _b; GU_FNV64_MUL(_s); #else # define GU_FNV64_ITERATION(_s,_b) GU_FNV64_MUL(_s); _s ^= _b; #endif static GU_FORCE_INLINE void gu_fnv64a_internal (const void* buf, ssize_t const len, uint64_t* seed) { const uint8_t* bp = (const uint8_t*)buf; const uint8_t* const be = bp + len; while (bp + 2 <= be) { GU_FNV64_ITERATION(*seed,*bp++); GU_FNV64_ITERATION(*seed,*bp++); } if (bp < be) { GU_FNV64_ITERATION(*seed,*bp++); } assert(be == bp); } static gu_uint128_t const GU_SET128(GU_FNV128_PRIME, 0x0000000001000000ULL, 0x000000000000013BULL); static gu_uint128_t const GU_SET128(GU_FNV128_SEED, 0x6C62272E07BB0142ULL, 0x62B821756295C58DULL); #if defined(__SIZEOF_INT128__) #define GU_FNV128_XOR(_s,_b) _s ^= _b #if !defined(GU_FNVBITSHIFT_OPTIMIZATION) # define GU_FNV128_MUL(_x) _x *= GU_FNV128_PRIME #else /* GU_FNVBITSHIFT_OPTIMIZATION */ # define GU_FNV128_MUL(_x) \ _x +=(_x << 1) + (_x << 3) + (_x << 4) + (_x << 5) + (_x << 8) + (_x << 88); #endif /* GU_FNVBITSHIFT_OPTIMIZATION */ #else /* ! __SIZEOF_INT128__ */ #define GU_FNV128_XOR(_s,_b) (_s).u32[GU_32LO] ^= _b #if defined(GU_FNV128_FULL_MULTIPLICATION) # define GU_FNV128_MUL(_x) GU_MUL128_INPLACE(_x, GU_FNV128_PRIME) #else /* no FULL_MULTIPLICATION */ # define GU_FNV128_MUL(_x) { \ uint32_t carry = \ (((_x).u64[GU_64LO] & 0x00000000ffffffffULL) * 0x013b) >> 32; \ carry = (((_x).u64[GU_64LO] >> 32) * 0x013b + carry) >> 32; \ (_x).u64[GU_64HI] *= 0x013b; \ (_x).u64[GU_64HI] += ((_x).u64[GU_64LO] << 24) + carry; \ (_x).u64[GU_64LO] *= 0x013b; \ } #endif /* FULL_MULTIPLICATION */ #endif /* ! __SIZEOF_INT128__ */ #if !defined(GU_FNV_NORMAL) # define GU_FNV128_ITERATION(_s,_b) GU_FNV128_XOR(_s,_b); GU_FNV128_MUL(_s); #else # define GU_FNV128_ITERATION(_s,_b) GU_FNV128_MUL(_s); GU_FNV128_XOR(_s,_b); #endif inline static void gu_fnv128a_internal (const void* buf, ssize_t const len, gu_uint128_t* seed) { const uint8_t* bp = (const uint8_t*)buf; const uint8_t* const be = bp + len; /* this manual loop unrolling seems to be essential */ while (bp + 8 <= be) { GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); } if (bp + 4 <= be) { GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); } if (bp + 2 <= be) { GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); } if (bp < be) { GU_FNV128_ITERATION(*seed, *bp++); } assert(be == bp); } #endif /* _gu_fnv_h_ */ galera-3-25.3.20/galerautils/src/gu_vlq.hpp0000644000015300001660000001124513042054732020252 0ustar jenkinsjenkins// // Copyright (C) 2011-2013 Codership Oy // //! // @file Variable-length quantity encoding for integers // // Unsigned integers: Implementation uses using unsigned LEB128, // see for example http://en.wikipedia.org/wiki/LEB128. // // Signed integers: TODO // #ifndef GU_VLQ_HPP #define GU_VLQ_HPP #include "gu_buffer.hpp" #include "gu_throw.hpp" #include "gu_macros.h" #define GU_VLQ_CHECKS #define GU_VLQ_ALEX namespace gu { //! // @brief Retun number of bytes required to represent given value in ULEB128 // representation. // // @param value Unsigned value // // @return Number of bytes required for value representation // template inline size_t uleb128_size(UI value) { size_t i(1); value >>= 7; for (; value != 0; value >>= 7, ++i) {} return i; } //! // @brief Encode unsigned type to ULEB128 representation // // @param value // @param buf // @param buflen // @param offset // // @return Offset // template inline size_t uleb128_encode(UI value, byte_t* buf, size_t buflen, size_t offset) { #ifdef GU_VLQ_ALEX assert (offset < buflen); buf[offset] = value & 0x7f; while (value >>= 7) { buf[offset] |= 0x80; ++offset; #ifdef GU_VLQ_CHECKS if (gu_unlikely(offset >= buflen)) gu_throw_fatal; #else assert(offset < buflen); #endif /* GU_VLQ_CHECKS */ buf[offset] = value & 0x7f; } return offset + 1; #else /* GU_VLQ_ALEX */ do { #ifdef GU_VLQ_CHECKS if (gu_unlikely(offset >= buflen)) gu_throw_fatal; #else assert(offset < buflen); #endif /* GU_VLQ_CHECKS */ buf[offset] = value & 0x7f; value >>= 7; if (gu_unlikely(value != 0)) { buf[offset] |= 0x80; } ++offset; } while (value != 0); return offset; #endif /* GU_VLQ_ALEX */ } template inline size_t uleb128_encode(UI value, byte_t* buf, size_t buflen) { return uleb128_encode(value, buf, buflen, 0); } /* checks helper for the uleb128_decode() below */ extern void uleb128_decode_checks (const byte_t* buf, size_t buflen, size_t offset, size_t avail_bits); //! // @brief Decode unsigned type from ULEB128 representation // // @param buf // @param buflen // @param offset // @param value // // @return Offset // template inline size_t uleb128_decode(const byte_t* buf, size_t buflen, size_t offset, UI& value) { // initial check for overflow, at least one byte must be readable #ifdef GU_VLQ_CHECKS if (gu_unlikely(offset >= buflen)) gu_throw_fatal; #endif #ifdef GU_VLQ_ALEX value = buf[offset] & 0x7f; size_t shift(0); while (buf[offset] & 0x80) { ++offset; shift +=7; #ifdef GU_VLQ_CHECKS ssize_t left_bits((sizeof(UI) << 3) - shift); if (gu_unlikely(offset >= buflen || left_bits < 7)) uleb128_decode_checks (buf, buflen, offset, left_bits); #endif value |= (UI(buf[offset] & 0x7f) << shift); } return offset + 1; #else /* GU_VLQ_ALEX */ value = 0; size_t shift(0); while (true) { value |= (UI(buf[offset] & 0x7f) << shift); if (gu_likely((buf[offset] & 0x80) == 0)) { // last byte ++offset; break; } ++offset; shift += 7; #ifdef GU_VLQ_CHECKS ssize_t left_bits((sizeof(UI) << 3) - shift); if (gu_unlikely(offset >= buflen || left_bits < 7)) uleb128_decode_checks (buf, buflen, offset, left_bits); #endif } return offset; #endif /* GU_VLQ_ALEX */ } template inline size_t uleb128_decode(const byte_t* buf, size_t buflen, UI& value) { return uleb128_decode(buf, buflen, 0, value); } } #endif // GU_VLQ_HPP galera-3-25.3.20/galerautils/src/gu_mem_pool.hpp0000644000015300001660000001275413042054732021265 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy */ /** * @file Self-adjusting pool of same size memory buffers. * * How it works: pool is never allowed to keep more than half of total * allocated buffers (plus min_count), so at least half of buffers must be * in use. As more than half goes out of use they will be deallocated rather * than placed back in the pool. * * $Id$ */ #ifndef _GU_MEM_POOL_HPP_ #define _GU_MEM_POOL_HPP_ #include "gu_lock.hpp" #include "gu_macros.hpp" #include #include #include namespace gu { typedef std::vector MemPoolVector; /* Since we specialize this template iwth thread_safe=true parameter below, * this makes it implicit thread_safe=false specialization. */ template class MemPool { public: explicit MemPool(int buf_size, int reserve = 0, const char* name = "") : pool_ (), hits_ (0), misses_ (0), allocd_ (0), name_ (name), buf_size_ (buf_size), reserve_ (reserve) { assert(buf_size_ > 0); assert(reserve_ >= 0); pool_.reserve(reserve_); } ~MemPool() { /* all buffers must be returned to pool before destruction */ assert(pool_.size() == allocd_); for (size_t i(0); i < pool_.size(); ++i) { assert(pool_[i]); free(pool_[i]); } } void* acquire() { void* ret(from_pool()); if (!ret) ret = alloc(); return ret; } void recycle(void* buf) { if (!to_pool(buf)) free(buf); } void print(std::ostream& os) const { double hr(hits_); if (hr > 0) { assert(misses_ > 0); hr /= hits_ + misses_; } os << "MemPool(" << name_ << "): hit ratio: " << hr << ", misses: " << misses_ << ", in use: " << allocd_ - pool_.size() << ", in pool: " << pool_.size(); } size_t buf_size() const { return buf_size_; } protected: /* from_pool() and to_pool() will need to be called under mutex * in thread-safe version, so all object data are modified there. * alloc() and free() then can be called outside critical section. */ void* from_pool() { void* ret(NULL); if (pool_.size() > 0) { ret = pool_.back(); assert(ret); pool_.pop_back(); ++hits_; } else { ++allocd_; ++misses_; } return ret; } // returns false if buffer can't be returned to pool bool to_pool(void* buf) { assert(buf); bool const ret(reserve_ + allocd_/2 > pool_.size()); if (ret) { pool_.push_back(buf); } else { assert(allocd_ > 0); --allocd_; } return ret; } void* alloc() { return (operator new(buf_size_)); } void free(void* const buf) { assert(buf); operator delete(buf); } friend class MemPool; private: MemPoolVector pool_; size_t hits_; size_t misses_; size_t allocd_; const char* const name_; unsigned int const buf_size_; unsigned int const reserve_; MemPool (const MemPool&); MemPool operator= (const MemPool&); }; /* class MemPool: thread-unsafe */ /* Thread-safe MemPool specialization. * Even though MemPool technically IS-A MemPool, the need to * overload nearly all public methods and practical uselessness of * polymorphism in this case make inheritance undesirable. */ template <> class MemPool { public: explicit MemPool(int buf_size, int reserve = 0, const char* name = "") : base_(buf_size, reserve, name), mtx_ () {} ~MemPool() {} void* acquire() { void* ret; { Lock lock(mtx_); ret = base_.from_pool(); } if (!ret) ret = base_.alloc(); return ret; } void recycle(void* buf) { bool pooled; { Lock lock(mtx_); pooled = base_.to_pool(buf); } if (!pooled) base_.free(buf); } void print(std::ostream& os) const { Lock lock(mtx_); base_.print(os); } size_t buf_size() const { return base_.buf_size(); } private: MemPool base_; Mutex mtx_; }; /* class MemPool: thread-safe */ template std::ostream& operator << (std::ostream& os, const MemPool& mp) { mp.print(os); return os; } typedef MemPool MemPoolUnsafe; typedef MemPool MemPoolSafe; } /* namespace gu */ #endif /* _GU_MEM_POOL_HPP_ */ galera-3-25.3.20/galerautils/src/gu_status.hpp0000644000015300001660000000165013042054732020772 0ustar jenkinsjenkins// Copyright (C) 2014 Codership Oy //! // @file // Common class for gathering Galera wide status. The class is simple // string based key-value store. // #ifndef GU_STATUS_HPP #define GU_STATUS_HPP #include "gu_exception.hpp" #include #include namespace gu { class Status { public: typedef std::map VarMap; typedef VarMap::iterator iterator; typedef VarMap::const_iterator const_iterator; Status() : vars_() { } void insert(const std::string& key, const std::string& val) { vars_.insert(std::make_pair(key, val)); } const_iterator begin() { return vars_.begin(); } const_iterator end() { return vars_.end(); } size_t size() const { return vars_.size(); } private: VarMap vars_; }; } #endif // !GU_STATUS_HPP galera-3-25.3.20/galerautils/src/gu_uuid.hpp0000644000015300001660000000755113042054732020423 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy * */ #ifndef _gu_uuid_hpp_ #define _gu_uuid_hpp_ #include "gu_uuid.h" #include "gu_assert.hpp" #include "gu_buffer.hpp" #include "gu_throw.hpp" #include inline bool operator==(const gu_uuid_t& a, const gu_uuid_t& b) { return gu_uuid_compare(&a, &b) == 0; } inline bool operator!=(const gu_uuid_t& a, const gu_uuid_t& b) { return !(a == b); } inline std::ostream& operator<<(std::ostream& os, const gu_uuid_t& uuid) { char uuid_buf[GU_UUID_STR_LEN + 1]; ssize_t ret(gu_uuid_print(&uuid, uuid_buf, sizeof(uuid_buf))); (void)ret; assert(ret == GU_UUID_STR_LEN); uuid_buf[GU_UUID_STR_LEN] = '\0'; return (os << uuid_buf); } inline ssize_t gu_uuid_from_string(const std::string& s, gu_uuid_t& uuid) { ssize_t ret(gu_uuid_scan(s.c_str(), s.size(), &uuid)); if (ret == -1) { gu_throw_error(EINVAL) << "could not parse UUID from '" << s << '\'' ; } return ret; } inline std::istream& operator>>(std::istream& is, gu_uuid_t& uuid) { char str[GU_UUID_STR_LEN + 1]; is.width(GU_UUID_STR_LEN + 1); is >> str; gu_uuid_from_string(str, uuid); return is; } inline size_t gu_uuid_serial_size(const gu_uuid_t& uuid) { return sizeof(uuid.data); } inline size_t gu_uuid_serialize(const gu_uuid_t& uuid, gu::byte_t* buf, size_t buflen, size_t offset) { if (offset + gu_uuid_serial_size(uuid) > buflen) gu_throw_error (EMSGSIZE) << gu_uuid_serial_size(uuid) << " > " << (buflen - offset); memcpy(buf + offset, uuid.data, gu_uuid_serial_size(uuid)); offset += gu_uuid_serial_size(uuid); return offset; } inline size_t gu_uuid_unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, gu_uuid_t& uuid) { if (offset + gu_uuid_serial_size(uuid) > buflen) gu_throw_error (EMSGSIZE) << gu_uuid_serial_size(uuid) << " > " << (buflen - offset); memcpy(uuid.data, buf + offset, gu_uuid_serial_size(uuid)); offset += gu_uuid_serial_size(uuid); return offset; } namespace gu { class UUID; } class gu::UUID { public: UUID() : uuid_(GU_UUID_NIL) {} UUID(const void* node, const size_t node_len) : uuid_() { gu_uuid_generate(&uuid_, node, node_len); } UUID(gu_uuid_t uuid) : uuid_(uuid) {} virtual ~UUID() {} size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { return gu_uuid_unserialize(buf, buflen, offset, uuid_); } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { return gu_uuid_serialize(uuid_, buf, buflen, offset); } static size_t serial_size() { return sizeof(gu_uuid_t); } const gu_uuid_t* uuid_ptr() const { return &uuid_; } bool operator<(const UUID& cmp) const { return (gu_uuid_compare(&uuid_, &cmp.uuid_) < 0); } bool operator==(const UUID& cmp) const { return (gu_uuid_compare(&uuid_, &cmp.uuid_) == 0); } bool operator!=(const UUID& cmp) const { return !(*this == cmp); } bool older(const UUID& cmp) const { return (gu_uuid_older(&uuid_, &cmp.uuid_) > 0); } void write_stream(std::ostream& os) const { os << uuid_; } void read_stream(std::istream& is) { is >> uuid_; } protected: gu_uuid_t uuid_; }; // class UUID namespace gu { inline std::ostream& operator<<(std::ostream& os, const gu::UUID& uuid) { uuid.write_stream(os); return os; } inline std::istream& operator>>(std::istream& is, gu::UUID& uuid) { uuid.read_stream(is); return is; } } // namespace gu #endif // _gu_uuid_hpp_ galera-3-25.3.20/galerautils/src/gu_crc32c.h0000644000015300001660000000211013042054732020156 0ustar jenkinsjenkins/* * Copyright (C) 2013 Codership Oy * * @file Interface to CRC-32C implementation from www.evanjones.ca * * $Id$ */ #ifndef _GU_CRC32C_H_ #define _GU_CRC32C_H_ #if defined(__cplusplus) extern "C" { #endif #include "www.evanjones.ca/crc32c.h" #include "gu_macros.h" #include "gu_byteswap.h" /*! Call this to configure CRC32C to use the best available implementation */ extern void gu_crc32c_configure(); extern CRC32CFunctionPtr gu_crc32c_func; typedef uint32_t gu_crc32c_t; static gu_crc32c_t const GU_CRC32C_INIT = 0xFFFFFFFF; static GU_FORCE_INLINE void gu_crc32c_init (gu_crc32c_t* crc) { *crc = GU_CRC32C_INIT; } static GU_FORCE_INLINE void gu_crc32c_append (gu_crc32c_t* crc, const void* data, size_t size) { *crc = gu_crc32c_func (*crc, data, size); } static GU_FORCE_INLINE uint32_t gu_crc32c_get (gu_crc32c_t crc) { return (~(crc)); } static GU_FORCE_INLINE uint32_t gu_crc32c (const void* data, size_t size) { return (~(gu_crc32c_func (GU_CRC32C_INIT, data, size))); } #if defined(__cplusplus) } #endif #endif /* _GU_CRC32C_H_ */ galera-3-25.3.20/galerautils/src/gu_buf.hpp0000644000015300001660000000037613042054732020227 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy */ /** * @file generic buffer declaration * * $Id$ */ #ifndef _GU_BUF_HPP_ #define _GU_BUF_HPP_ #include "gu_buf.h" namespace gu { typedef struct gu_buf Buf; } #endif /* _GU_BUF_HPP_ */ galera-3-25.3.20/galerautils/src/gu_hexdump.h0000644000015300001660000000177613042054732020572 0ustar jenkinsjenkins// Copyright (C) 2012-2013 Codership Oy /** * @file Functions to dump binary buffer contents in a readable form * * $Id$ */ #ifndef _gu_hexdump_h_ #define _gu_hexdump_h_ #include "gu_types.h" #ifdef __cplusplus extern "C" { #endif /* This makes it 32*2 + 7 spaces = 71 character per line - just short of 80 */ #define GU_HEXDUMP_BYTES_PER_LINE 32 /*! Dumps contents of the binary buffer in a readable form to a 0-terminated * string of length not exeeding str_size - 1 * @param buf input binary buffer * @param but_size size of the input buffer * @param str target string buffer (will be always 0-terminated) * @param str_size string buffer size (including terminating 0) * @param alpha dump alphanumeric characters as they are, padded with '.' * (e.g. D.u.m.p.) */ extern void gu_hexdump(const void* buf, ssize_t buf_size, char* str, ssize_t str_size, bool alpha); #ifdef __cplusplus } #endif #endif /* _gu_hexdump_h_ */ galera-3-25.3.20/galerautils/src/gu_config.h0000644000015300001660000000335513042054732020360 0ustar jenkinsjenkins// Copyright (C) 2010-2014 Codership Oy /** * @file * C-interface for configuration management * * $Id$ */ #ifndef _gu_config_h_ #define _gu_config_h_ #include #include #include // for ssize_t #ifdef __cplusplus extern "C" { #endif typedef struct gu_config gu_config_t; gu_config_t* gu_config_create (void); void gu_config_destroy (gu_config_t* cnf); bool gu_config_has (gu_config_t* cnf, const char* key); bool gu_config_is_set (gu_config_t* cnf, const char* key); /* before setting a parameter, it must be added to a known parameter list*/ int gu_config_add (gu_config_t* cnf, const char* key, const char* val /*can be NULL*/); /* Getters/setters return 0 on success, 1 when key not set/not found, * negative error code in case of other errors (conversion failed and such) */ int gu_config_get_string (gu_config_t* cnf, const char* key, const char** val); int gu_config_get_int64 (gu_config_t* cnf, const char* key, int64_t* val); int gu_config_get_double (gu_config_t* cnf, const char* key, double* val); int gu_config_get_ptr (gu_config_t* cnf, const char* key, void** val); int gu_config_get_bool (gu_config_t* cnf, const char* key, bool* val); void gu_config_set_string (gu_config_t* cnf, const char* key, const char* val); void gu_config_set_int64 (gu_config_t* cnf, const char* key, int64_t val); void gu_config_set_double (gu_config_t* cnf, const char* key, double val); void gu_config_set_ptr (gu_config_t* cnf, const char* key, const void* val); void gu_config_set_bool (gu_config_t* cnf, const char* key, bool val); ssize_t gu_config_print (gu_config_t* cnf, char* buf, ssize_t buf_len); #ifdef __cplusplus } #endif #endif /* _gu_config_h_ */ galera-3-25.3.20/galerautils/src/gu_init.h0000644000015300001660000000063213042054732020051 0ustar jenkinsjenkins/* * Copyright (C) 2013 Codership Oy * * $Id$ */ /*! @file Common initializer for various galerautils parts. Currently it is * logger and CRC32C implementation. */ #ifndef _GU_INIT_H_ #define _GU_INIT_H_ #if defined(__cplusplus) extern "C" { #endif #include "gu_log.h" extern void gu_init (gu_log_cb_t log_cb); #if defined(__cplusplus) } #endif #endif /* _GU_INIT_H_ */ galera-3-25.3.20/galerautils/src/gu_barrier.hpp0000644000015300001660000000221313042054732021071 0ustar jenkinsjenkins// // Copyright (C) 2016 Codership Oy // #ifndef GU_BARRIER #define GU_BARRIER #include #include "gu_throw.hpp" namespace gu { class Barrier { public: Barrier(unsigned count) : barrier_() { int err; if ((err = pthread_barrier_init(&barrier_, 0, count)) != 0) { gu_throw_error(err) << "Barrier init failed"; } } ~Barrier() { int err; if ((err = pthread_barrier_destroy(&barrier_)) != 0) { assert(0); log_warn << "Barrier destroy failed: " << ::strerror(err); } } void wait() { int err(pthread_barrier_wait(&barrier_)); if (err != 0 && err != PTHREAD_BARRIER_SERIAL_THREAD) { gu_throw_error(err) << "Barrier wait failed"; } } private: // Non-copyable Barrier(const Barrier&); Barrier& operator=(const Barrier&); pthread_barrier_t barrier_; }; } #endif // GU_BARRIER galera-3-25.3.20/galerautils/src/gu_byteswap.hpp0000644000015300001660000000246013042054732021305 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /** * @file Endian conversion templates for serialization * * $Id$ */ #ifndef _gu_byteswap_hpp_ #define _gu_byteswap_hpp_ #include "gu_byteswap.h" #include namespace gu { /* General template: undefined */ template T gtoh (const T& val) { // to generate error on compilation rather then linking return val.this_template_does_not_support_this_type(); } /* Specialized templates */ template <> GU_FORCE_INLINE uint8_t gtoh (const uint8_t& val) { return val; } template <> GU_FORCE_INLINE uint16_t gtoh (const uint16_t& val) { return gtoh16(val); } template <> GU_FORCE_INLINE unsigned int gtoh (const unsigned int& val) { return gtoh32(val); } #if __LONG_MAX__ == __INT_MAX__ template <> GU_FORCE_INLINE unsigned long gtoh (const unsigned long& val) { return gtoh32(val); } #elif __LONG_MAX__ == __LONG_LONG_MAX__ template <> GU_FORCE_INLINE unsigned long gtoh (const unsigned long& val) { return gtoh64(val); } #else # error can not determine size of long #endif template <> GU_FORCE_INLINE unsigned long long gtoh (const unsigned long long& val) { return gtoh64(val); } template T htog (const T& val) { return gtoh(val); } } /* namespace gu */ #endif /* _gu_byteswap_hpp_ */ galera-3-25.3.20/galerautils/src/gu_debug_sync.hpp0000644000015300001660000000203113042054732021563 0ustar jenkinsjenkins// // Copyright (C) 2014 Codership Oy // // // Define -DGU_DBUG_ON to enable GU_DBUG macros // // Usage: // // GU_DBUG_SYNC_WAIT("sync_point_identifier") // // The macro above will block whenever "dbug=d,sync_point_identifier" // parameter has been passed to provider. // // Blocking waiters can be signalled by setting "signal=sync_point_identifier" // option. // // List of waiters can be monitored from wsrep debug_sync_waiters status // variable. // #ifndef GU_DEBUG_SYNC_HPP #define GU_DEBUG_SYNC_HPP #ifdef GU_DBUG_ON #include #include "gu_dbug.h" #define GU_DBUG_SYNC_WAIT(_c) \ GU_DBUG_EXECUTE(_c, gu_debug_sync_wait(_c);) // Wait for sync signal identified by sync string void gu_debug_sync_wait(const std::string& sync); // Signal waiter identified by sync string void gu_debug_sync_signal(const std::string& sync); // Get list of active sync waiters std::string gu_debug_sync_waiters(); #else #define GU_DBUG_SYNC_WAIT(_c) #endif // GU_DBUG_ON #endif // GU_DEBUG_SYNC_HPP galera-3-25.3.20/galerautils/src/gu_resolver.cpp0000644000015300001660000003346513042054732021314 0ustar jenkinsjenkins// Copyright (C) 2009-2013 Codership Oy #include "gu_resolver.hpp" #include "gu_logger.hpp" #include "gu_utils.hpp" #include "gu_throw.hpp" #include "gu_uri.hpp" #include #include #include // for close() #include #include #include #define BSD_COMP /* For SIOCGIFCONF et al on Solaris */ #include #include #include #if defined(__APPLE__) || defined(__FreeBSD__) # include # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP #else /* !__APPLE__ && !__FreeBSD__ */ extern "C" /* old style cast */ { static int const GU_SIOCGIFCONF = SIOCGIFCONF; static int const GU_SIOCGIFINDEX = SIOCGIFINDEX; } #endif /* !__APPLE__ && !__FreeBSD__ */ //using namespace std; using std::make_pair; // Map from scheme string to addrinfo class SchemeMap { public: typedef std::map Map; typedef Map::const_iterator const_iterator; SchemeMap() : ai_map() { ai_map.insert(make_pair("tcp", get_addrinfo(0, AF_UNSPEC, SOCK_STREAM, 0))); ai_map.insert(make_pair("ssl", get_addrinfo(0, AF_UNSPEC, SOCK_STREAM, 0))); ai_map.insert(make_pair("udp", get_addrinfo(0, AF_UNSPEC, SOCK_DGRAM, 0))); // TODO: } const_iterator find(const std::string& key) const { return ai_map.find(key); } const_iterator end() const { return ai_map.end(); } static const addrinfo* get_addrinfo(const_iterator i) { return &i->second; } private: Map ai_map; struct addrinfo get_addrinfo(int flags, int family, int socktype, int protocol) { struct addrinfo ret = { flags, family, socktype, protocol, #if defined(__FreeBSD__) 0, // FreeBSD gives ENOMEM error with non-zero value #else sizeof(struct sockaddr), #endif 0, 0, 0 }; return ret; } }; static SchemeMap scheme_map; // Helper to copy addrinfo structs. static void copy(const addrinfo& from, addrinfo& to) { to.ai_flags = from.ai_flags; to.ai_family = from.ai_family; to.ai_socktype = from.ai_socktype; to.ai_protocol = from.ai_protocol; to.ai_addrlen = from.ai_addrlen; if (from.ai_addr != 0) { if ((to.ai_addr = reinterpret_cast(malloc(to.ai_addrlen))) == 0) { gu_throw_fatal << "out of memory while trying to allocate " << to.ai_addrlen << " bytes"; } memcpy(to.ai_addr, from.ai_addr, to.ai_addrlen); } to.ai_canonname = 0; to.ai_next = 0; } ///////////////////////////////////////////////////////////////////////// // Sockaddr implementation ///////////////////////////////////////////////////////////////////////// bool gu::net::Sockaddr::is_multicast() const { switch (sa_->sa_family) { case AF_INET: return IN_MULTICAST(ntohl(reinterpret_cast(sa_)->sin_addr.s_addr)); case AF_INET6: return IN6_IS_ADDR_MULTICAST(&reinterpret_cast(sa_)->sin6_addr); default: gu_throw_fatal; } } bool gu::net::Sockaddr::is_anyaddr() const { switch (sa_->sa_family) { case AF_INET: return (ntohl(reinterpret_cast(sa_)->sin_addr.s_addr) == INADDR_ANY); case AF_INET6: return IN6_IS_ADDR_UNSPECIFIED(&reinterpret_cast(sa_)->sin6_addr); default: gu_throw_fatal; } } gu::net::Sockaddr::Sockaddr(const sockaddr* sa, socklen_t sa_len) : sa_ (0 ), sa_len_(sa_len) { if ((sa_ = reinterpret_cast(malloc(sa_len_))) == 0) { gu_throw_fatal; } memcpy(sa_, sa, sa_len_); } gu::net::Sockaddr::Sockaddr(const Sockaddr& s) : sa_ (0 ), sa_len_(s.sa_len_) { if ((sa_ = reinterpret_cast(malloc(sa_len_))) == 0) { gu_throw_fatal; } memcpy(sa_, s.sa_, sa_len_); } gu::net::Sockaddr::~Sockaddr() { free(sa_); } ///////////////////////////////////////////////////////////////////////// // MReq implementation ///////////////////////////////////////////////////////////////////////// static unsigned int get_ifindex_by_addr(const gu::net::Sockaddr& addr) { if (addr.is_anyaddr() == true) { return 0; } unsigned int idx(-1); int err(0); #if defined(__APPLE__) || defined(__FreeBSD__) struct ifaddrs *if_addrs = NULL; struct ifaddrs *if_addr = NULL; if (getifaddrs (&if_addrs) != 0) { err = errno; goto out; } for (if_addr = if_addrs; if_addr != NULL; if_addr = if_addr->ifa_next) { try { gu::net::Sockaddr sa (if_addr->ifa_addr, sizeof (struct sockaddr)); if (sa.get_family () == addr.get_family () && memcmp (sa.get_addr (), addr.get_addr (), addr.get_addr_len ()) == 0) { idx = if_nametoindex (if_addr->ifa_name); goto out; } } catch (gu::Exception& e) { } } out: # else /* !__APPLE__ && !__FreeBSD__ */ struct ifconf ifc; memset(&ifc, 0, sizeof(struct ifconf)); ifc.ifc_len = 16*sizeof(struct ifreq); std::vector ifr(16); ifc.ifc_req = &ifr[0]; int fd(socket(AF_INET, SOCK_DGRAM, 0)); if (fd == -1) { err = errno; gu_throw_error(err) << "could not create socket"; } if ((err = ioctl(fd, GU_SIOCGIFCONF, &ifc)) == -1) { err = errno; goto out; } log_debug << "read: " << ifc.ifc_len; for (size_t i(0); i < ifc.ifc_len/sizeof(struct ifreq); ++i) { struct ifreq* ifrp(&ifr[i]); try { log_debug << "read: " << ifrp->ifr_name; gu::net::Sockaddr sa(&ifrp->ifr_addr, sizeof(struct sockaddr)); if (sa.get_family() == addr.get_family() && memcmp(sa.get_addr(), addr.get_addr(), addr.get_addr_len()) == 0) { if ((err = ioctl(fd, GU_SIOCGIFINDEX, ifrp, sizeof(struct ifreq))) == -1) { err = errno; } #if defined(__linux__) idx = ifrp->ifr_ifindex; #elif defined(__sun__) idx = ifrp->ifr_index; #else # error "Unsupported ifreq structure" #endif goto out; } } catch (gu::Exception& e) { } } out: close(fd); #endif /* !__APPLE__ && !__FreeBSD__ */ if (err != 0) { gu_throw_error(err) << "failed to get interface index"; } else { log_debug << "returning ifindex: " << idx; } return idx; } gu::net::MReq::MReq(const Sockaddr& mcast_addr, const Sockaddr& if_addr) : mreq_ ( 0), mreq_len_ ( 0), ipproto_ ( 0), add_membership_opt_ (-1), drop_membership_opt_(-1), multicast_if_opt_ (-1), multicast_loop_opt_ (-1), multicast_ttl_opt_ (-1) { log_debug << mcast_addr.get_family() << " " << if_addr.get_family(); if (mcast_addr.get_family() != if_addr.get_family()) { gu_throw_fatal << "address families do not match: " << mcast_addr.get_family() << ", " << if_addr.get_family(); } if (mcast_addr.get_family() != AF_INET && mcast_addr.get_family() != AF_INET6) { gu_throw_fatal << "Mreq: address family " << mcast_addr.get_family() << " not supported"; } get_ifindex_by_addr(if_addr); mreq_len_ = (mcast_addr.get_family() == AF_INET ? sizeof(struct ip_mreq) : sizeof(struct ipv6_mreq)); if ((mreq_ = malloc(mreq_len_)) == 0) { gu_throw_fatal << "could not allocate memory"; } memset(mreq_, 0, mreq_len_); switch (mcast_addr.get_family()) { case AF_INET: { struct ip_mreq* mr(reinterpret_cast(mreq_)); mr->imr_multiaddr.s_addr = *reinterpret_cast(mcast_addr.get_addr()); mr->imr_interface.s_addr = *reinterpret_cast(if_addr.get_addr()); ipproto_ = IPPROTO_IP; add_membership_opt_ = IP_ADD_MEMBERSHIP; drop_membership_opt_ = IP_DROP_MEMBERSHIP; multicast_if_opt_ = IP_MULTICAST_IF; multicast_loop_opt_ = IP_MULTICAST_LOOP; multicast_ttl_opt_ = IP_MULTICAST_TTL; break; } case AF_INET6: { struct ipv6_mreq* mr(reinterpret_cast(mreq_)); mr->ipv6mr_multiaddr = *reinterpret_cast(mcast_addr.get_addr()); mr->ipv6mr_interface = get_ifindex_by_addr(if_addr); ipproto_ = IPPROTO_IPV6; add_membership_opt_ = IPV6_ADD_MEMBERSHIP; drop_membership_opt_ = IPV6_DROP_MEMBERSHIP; multicast_loop_opt_ = IPV6_MULTICAST_LOOP; multicast_ttl_opt_ = IPV6_MULTICAST_HOPS; break; } } } gu::net::MReq::~MReq() { free(mreq_); } const void* gu::net::MReq::get_multicast_if_value() const { switch (ipproto_) { case IPPROTO_IP: return &reinterpret_cast(mreq_)->imr_interface; case IPPROTO_IPV6: return &reinterpret_cast(mreq_)->ipv6mr_interface; default: gu_throw_fatal << "get_multicast_if_value() not implemented for: " << ipproto_; } } int gu::net::MReq::get_multicast_if_value_size() const { switch (ipproto_) { case IPPROTO_IP: return sizeof(reinterpret_cast(mreq_)->imr_interface); case IPPROTO_IPV6: return sizeof(reinterpret_cast(mreq_)->ipv6mr_interface); default: gu_throw_fatal << "get_multicast_if_value_size() not implemented for: " << ipproto_; } } ///////////////////////////////////////////////////////////////////////// // Addrinfo implementation ///////////////////////////////////////////////////////////////////////// gu::net::Addrinfo::Addrinfo(const addrinfo& ai) : ai_() { copy(ai, ai_); } gu::net::Addrinfo::Addrinfo(const Addrinfo& ai) : ai_() { copy(ai.ai_, ai_); } gu::net::Addrinfo::Addrinfo(const Addrinfo& ai, const Sockaddr& sa) : ai_() { if (ai.get_addrlen() != sa.get_sockaddr_len()) { gu_throw_fatal; } copy(ai.ai_, ai_); memcpy(ai_.ai_addr, &sa.get_sockaddr(), ai_.ai_addrlen); } gu::net::Addrinfo::~Addrinfo() { free(ai_.ai_addr); } std::string gu::net::Addrinfo::to_string() const { static const size_t max_addr_str_len = (6 /* tcp|udp:// */ + INET6_ADDRSTRLEN + 2 /* [] */ + 6 /* :portt */); std::string ret; ret.reserve(max_addr_str_len); Sockaddr addr(ai_.ai_addr, ai_.ai_addrlen); switch (get_socktype()) { case SOCK_STREAM: ret += "tcp://"; break; case SOCK_DGRAM: ret += "udp://"; break; default: gu_throw_error(EINVAL) << "invalid socktype: " << get_socktype(); } char dst[INET6_ADDRSTRLEN + 1]; if (inet_ntop(get_family(), addr.get_addr(), dst, sizeof(dst)) == 0) { gu_throw_error(errno) << "inet ntop failed"; } switch (get_family()) { case AF_INET: ret += dst; break; case AF_INET6: ret += "["; ret += dst; ret += "]"; break; default: gu_throw_error(EINVAL) << "invalid address family: " << get_family(); } ret += ":" + gu::to_string(ntohs(addr.get_port())); ret.reserve(0); // free unused space if possible return ret; } ///////////////////////////////////////////////////////////////////////// // Public methods ///////////////////////////////////////////////////////////////////////// gu::net::Addrinfo gu::net::resolve(const URI& uri) { SchemeMap::const_iterator i(scheme_map.find(uri.get_scheme())); if (i == scheme_map.end()) { gu_throw_error(EINVAL) << "invalid scheme: " << uri.get_scheme(); } try { std::string host(uri.get_host()); // remove [] if this is IPV6 address size_t pos(host.find_first_of('[')); if (pos != std::string::npos) { host.erase(pos, pos + 1); pos = host.find_first_of(']'); if (pos == std::string::npos) { gu_throw_error(EINVAL) << "invalid host: " << uri.get_host(); } host.erase(pos, pos + 1); } int err; addrinfo* ai(0); try { err = getaddrinfo(host.c_str(), uri.get_port().c_str(), SchemeMap::get_addrinfo(i), &ai); } catch (NotSet&) { err = getaddrinfo(host.c_str(), NULL, SchemeMap::get_addrinfo(i), &ai); } if (err != 0) { // Use EHOSTUNREACH as generic error number in case errno // is zero. Real error should be apparent from exception message gu_throw_error(errno == 0 ? EHOSTUNREACH : errno) << "getaddrinfo failed with error '" << gai_strerror(err) << "' (" << err << ") for " << uri.to_string(); } // Assume that the first entry is ok Addrinfo ret(*ai); freeaddrinfo(ai); return ret; } catch (NotFound& nf) { gu_throw_error(EINVAL) << "invalid URI: " << uri.to_string(); } } galera-3-25.3.20/galerautils/src/gu_stats.hpp0000644000015300001660000000174113042054732020606 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #ifndef _gu_stats_hpp_ #define _gu_stats_hpp_ #include namespace gu { class Stats { public: Stats():n_(0), old_m_(), new_m_(), old_s_(), new_s_(), min_(), max_() {} void insert(const double); void clear() { n_ = 0; } unsigned int times() const { return n_; } double min() const; double max() const; double mean() const; double variance() const; double std_dev() const; friend std::ostream& operator<<(std::ostream&, const Stats&); std::string to_string() const; private: unsigned int n_; double old_m_; double new_m_; double old_s_; double new_s_; double min_; double max_; }; std::ostream& operator<<(std::ostream&, const Stats&); } #endif // _gu_stats_hpp_ galera-3-25.3.20/galerautils/src/gu_crc.hpp0000644000015300001660000000131613042054732020215 0ustar jenkinsjenkins/* * Copyright (C) 2013 Codership Oy * * @file header for various CRC stuff * * $Id$ */ #ifndef GU_CRC_HPP #define GU_CRC_HPP #include "gu_crc32c.h" namespace gu { class CRC32C { public: CRC32C() : state_(GU_CRC32C_INIT) {} void append(const void* const data, size_t const size) { gu_crc32c_append (&state_, data, size); } uint32_t get() const { return gu_crc32c_get(state_); } uint32_t operator() () const { return get(); } static uint32_t digest(const void* const data, size_t const size) { return gu_crc32c(data, size); } private: gu_crc32c_t state_; }; /* class CRC32C */ } /* namespace gu */ #endif /* GU_CRC_HPP */ galera-3-25.3.20/galerautils/src/gu_hexdump.cpp0000644000015300001660000000152113042054732021111 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file operator << for hexdump - definiton * * $Id$ */ #include "gu_hexdump.hpp" #include "gu_hexdump.h" #include "gu_logger.hpp" namespace gu { static size_t const hexdump_bytes_per_go(GU_HEXDUMP_BYTES_PER_LINE * 2); static size_t const hexdump_reserve_string( hexdump_bytes_per_go*2 /* chars */ + hexdump_bytes_per_go/4 /* whitespace */ + 1 /* \0 */ ); std::ostream& Hexdump::to_stream (std::ostream& os) const { char str[hexdump_reserve_string]; size_t off(0); while (off < size_) { size_t const to_print(std::min(size_ - off, hexdump_bytes_per_go)); gu_hexdump (buf_ + off, to_print, str, sizeof(str), alpha_); off += to_print; os << str; if (off < size_) os << '\n'; } return os; } } // namespace gu galera-3-25.3.20/galerautils/src/gu_logger.cpp0000644000015300001660000000657513042054732020734 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * This code is based on an excellent article at Dr.Dobb's: * http://www.ddj.com/cpp/201804215?pgno=1 */ #include #include #include #include #include #include "gu_logger.hpp" #include "gu_string_utils.hpp" // strsplit #include #include #include using std::string; using std::vector; using std::set; namespace gu { class DebugFilter { set filter; public: DebugFilter() : filter() { if (::getenv("LOGGER_DEBUG_FILTER")) { set_filter(::getenv("LOGGER_DEBUG_FILTER")); } } ~DebugFilter() {} void set_filter(const string& str) { vector dvec = gu::strsplit(str, ','); for (vector::const_iterator i = dvec.begin(); i != dvec.end(); ++i) { filter.insert(*i); } } size_t size() const { return filter.size(); } bool is_set(const string& str) const { return filter.find(str) != filter.end() || filter.find(str.substr(0, str.find_first_of(":"))) != filter.end(); } }; static DebugFilter debug_filter; void Logger::set_debug_filter(const string& str) { debug_filter.set_filter(str); } bool Logger::no_debug(const string& file, const string& func, const int line) { return debug_filter.size() > 0 && debug_filter.is_set(func) == false; } #ifndef _gu_log_h_ void Logger::enable_tstamp (bool yes) { do_timestamp = yes; } void Logger::enable_debug (bool yes) { if (yes) { max_level = LOG_DEBUG; } else { max_level = LOG_INFO; } } void Logger::default_logger (int lvl, const char* msg) { fputs (msg, stderr); fputc ('\n', stderr); fflush (stderr); } void Logger::set_logger (LogCallback cb) { if (0 == cb) { logger = default_logger; } else { logger = cb; } } static const char* level_str[LOG_MAX] = { "FATAL: ", "ERROR: ", " WARN: ", " INFO: ", "DEBUG: " }; bool Logger::do_timestamp = false; LogLevel Logger::max_level = LOG_INFO; LogCallback Logger::logger = default_logger; #else #define do_timestamp gu_log_self_tstamp == true #define level_str gu_log_level_str #endif // _gu_log_h_ void Logger::prepare_default() { if (do_timestamp) { using namespace std; struct tm date; struct timeval time; gettimeofday (&time, NULL); localtime_r (&time.tv_sec, &date); os << date.tm_year + 1900 << '-' << setw(2) << setfill('0') << (date.tm_mon + 1) << '-' << setw(2) << setfill('0') << date.tm_mday << ' ' << setw(2) << setfill('0') << date.tm_hour << ':' << setw(2) << setfill('0') << date.tm_min << ':' << setw(2) << setfill('0') << date.tm_sec << '.' << setw(3) << setfill('0') << (time.tv_usec / 1000) << ' '; } os << level_str[level]; } } galera-3-25.3.20/galerautils/src/gu_string.hpp0000644000015300001660000001721513042054732020761 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /*! * @file string class template that allows to allows to allocate initial storage * to hold string data together with the object. If storage is exhausted, * it transparently overflows to heap. */ #ifndef _GU_STRING_HPP_ #define _GU_STRING_HPP_ #include "gu_vector.hpp" #include #include // std::bad_alloc #include #include // realloc() #include // strlen(), strcmp() #include // snprintf() #include #include "gu_macros.h" // gu_likely() namespace gu { /* container for a printf()-like format */ struct Fmt { explicit Fmt(const char* f) : fmt_(f) {} const char* const fmt_; }; template class StringBase { public: typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef size_t size_type; size_type size() const { return size_; } size_type length()const { return size(); } pointer c_str() { return str_; } const_pointer c_str() const { return str_; } StringBase& operator<< (const Fmt& f) { fmt_ = f.fmt_; return *this; } StringBase& operator<< (const StringBase& s) { size_type const n(s.size()); append_string (s.c_str(), n); return *this; } StringBase& operator<< (const char* s) { size_type const n(::strlen(s)); append_string (s, n); return *this; } StringBase& operator<< (const std::string& s) { append_string (s.c_str(), s.length()); return *this; } StringBase& operator<< (const bool& b) { // following std::boolalpha if (b) append_string ("true", 4); else append_string ("false", 5); return *this; } StringBase& operator<< (const double& d) { convert ("%f", std::numeric_limits::digits10, d); return *this; } StringBase& operator<< (const void* const ptr) { /* not using %p here seeing that it may be not universally supported */ static size_type const ptr_len(sizeof(ptr) == 4 ? 11 : 19 ); static const char* const fmt(sizeof(ptr) == 4 ? "0x%08lx":"0x%016lx"); convert (fmt, ptr_len, reinterpret_cast(ptr)); return *this; } StringBase& operator<< (const long long &i) { convert ("%lld", 21, i); return *this; } StringBase& operator<< (const unsigned long long &i) { convert ("%llu", 20, i); return *this; } StringBase& operator<< (const int &i) { convert ("%d", 11, i); return *this; } StringBase& operator<< (const unsigned int &i) { convert ("%u", 10, i); return *this; } StringBase& operator<< (const short &i) { convert ("%hd", 6, i); return *this; } StringBase& operator<< (const unsigned short &i) { convert ("%hu", 5, i); return *this; } StringBase& operator<< (const char &c) { convert ("%c", 1, c); return *this; } StringBase& operator<< (const unsigned char &c) { convert ("%hhu", 3, c); return *this; } template StringBase& operator+= (const X& x) { return operator<<(x); } bool operator== (const StringBase& other) { return (size() == other.size() && !::strcmp(c_str(), other.c_str())); } bool operator== (const std::string& other) { return (size() == other.size() && !::strcmp(c_str(), other.c_str())); } bool operator== (const char* s) { size_type const s_size(::strlen(s)); return (size() == s_size && !::strcmp(c_str(), s)); } template bool operator!= (const X& x) { return !operator==(x); } void clear() { derived_clear(); }; StringBase& operator= (const StringBase& other) { clear(); append_string (other.c_str(), other.size()); return *this; } StringBase& operator= (const char* const other) { clear(); append_string (other, ::strlen(other)); return *this; } protected: pointer str_; // points to an adequately sized memory area const char* fmt_; size_type size_; virtual void reserve (size_type n) = 0; virtual void derived_clear() = 0; // real clear must happen in derived class void append_string (const_pointer const s, size_type const n) { reserve(size_ + n + 1); std::copy(s, s + n, &str_[size_]); size_ += n; str_[size_] = 0; } template void convert (const char* const format, size_type max_len, const X& x) { ++max_len; // add null termination reserve(size_ + max_len); int const n(snprintf(&str_[size_], max_len, fmt_ ? fmt_ : format, x)); assert(n > 0); assert(size_type(n) < max_len); if (gu_likely(n > 0)) size_ += n; str_[size_] = 0; // null-terminate even if snprintf() failed. fmt_ = NULL; } StringBase(pointer init_buf) : str_(init_buf), fmt_(NULL), size_(0) {} virtual ~StringBase() {} private: StringBase(const StringBase&); }; /* class StringBase */ template std::ostream& operator<< (std::ostream& os, const gu::StringBase& s) { os << s.c_str(); return os; } template class String : public StringBase { public: typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef size_t size_type; String() : StringBase(buf_), reserved_(capacity), buf_() { buf_[0] = 0; } explicit String(const StringBase& s) : StringBase(buf_), reserved_(capacity), buf_() { append_string (s.c_str(), s.size()); } String(const T* s, size_type n) : StringBase(buf_), reserved_(capacity), buf_() { append_string (s, n); } explicit String(const char* s) : StringBase(buf_), reserved_(capacity), buf_() { size_type const n(strlen(s)); append_string (s, n); } explicit String(const std::string& s) : StringBase(buf_), reserved_(capacity), buf_() { append_string (s.c_str(), s.length()); } #if 0 String& operator= (String other) { using namespace std; swap(other); return *this; } #endif template String& operator= (const X& x) { base::operator=(x); return *this; } ~String() { if (base::str_ != buf_) ::free(base::str_); } private: size_type reserved_; value_type buf_[capacity]; typedef StringBase base; void reserve (size_type const n) { if (n <= reserved_) return; assert (n > capacity); bool const overflow(buf_ == base::str_); pointer const tmp (static_cast (::realloc(overflow ? NULL : base::str_, n * sizeof(value_type)))); if (NULL == tmp) throw std::bad_alloc(); if (overflow) std::copy(buf_, buf_ + base::size_, tmp); base::str_ = tmp; reserved_ = n; } void derived_clear() { if (base::str_ != buf_) ::free(base::str_); base::str_ = buf_; base::size_ = 0; buf_[0] = 0; reserved_ = capacity; } void append_string (const_pointer s, size_type n) { base::append_string(s, n); } }; /* class String */ } /* namespace gu */ #endif /* _GU_STRING_HPP_ */ galera-3-25.3.20/galerautils/src/gu_regex.hpp0000644000015300001660000000344413042054732020564 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy /** * @file Regular expressions parser based on POSIX regex functions in * * $Id$ */ #ifndef _gu_regex_hpp_ #define _gu_regex_hpp_ #include #include #include #include "gu_throw.hpp" namespace gu { class RegEx { regex_t regex; std::string strerror (int rc) const; public: /*! * @param expr regular expression string */ RegEx (const std::string& expr) : regex() { int rc; if ((rc = regcomp(®ex, expr.c_str(), REG_EXTENDED)) != 0) { gu_throw_fatal << "regcomp(" << expr << "): " << strerror(rc); } } ~RegEx () { regfree (®ex); } /*! * This class is to differentiate between an empty and unset strings. * @todo: find a proper name for it and move to gu_utils.hpp */ class Match { std::string value; bool set; public: Match() : value(), set(false) {} Match(const std::string& s) : value(s), set(true) {} // throws NotSet const std::string& str() const { if (set) return value; throw NotSet(); } bool is_set() const { return set; } }; /*! * @brief Matches given string * * @param str string to match with expression * @param num number of matches to return * * @return vector of matched substrings */ std::vector match (const std::string& str, size_t num) const; }; } #endif /* _gu_regex_hpp_ */ galera-3-25.3.20/galerautils/src/gu_assert.h0000644000015300001660000000104213042054732020403 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy /** * @file Assert macro definition * * $Id$ */ #ifndef _gu_assert_h_ #define _gu_assert_h_ #include "gu_log.h" #ifndef DEBUG_ASSERT #include #else #include #undef assert /** Assert that sleeps instead of aborting the program, saving it for gdb */ #define assert(expr) if (!(expr)) { \ gu_fatal ("Assertion (%s) failed", __STRING(expr)); \ while(1) sleep(1); } #endif /* DEBUG_ASSERT */ #endif /* _gu_assert_h_ */ galera-3-25.3.20/galerautils/src/avalanche.c0000644000015300001660000000510113042054732020324 0ustar jenkinsjenkins/* * Copyright (c) 2012 Codership Oy * * This program is to measure avalanche effect of different hash * implementations, for that it uses 1M of random 8-byte keys. * Use #define macro below to define the implementation to test. * * Compilation: g++ -DHAVE_ENDIAN_H -DHAVE_BYTESWAP_H -O3 -Wall -Wno-unused avalanche.c \ gu_mmh3.c gu_spooky.c -o avalanche && time ./avalanche * Visualization in gnuplot: unset cbtics set xrange [-0.5:64.5] set yrange [-0.5:64.5] set cbrange [0.0:1.0] set xlabel 'Hash bit' set ylabel 'Flipped bit in message' set cblabel 'Hash bit flip probability [0.0 - 1.0]' set palette rgbformula 7,7,7 plot 'avalanche.out' matrix with image */ #include "gu_hash.h" #include #include #include uint64_t flip_count[64*64] = { 0, }; //#define HASH gu_mmh128_64 #define HASH gu_fast_hash64 int main (int argc, char* argv[]) { int n_keys = 1 << 20; int i, j, k; /* collect statistics */ for (k = 0; k < n_keys; k++) { uint64_t key_part = rand(); uint64_t const key = (key_part << 32) + rand(); uint64_t const hash = HASH (&key, sizeof(key)); for (j = 0; j < 64; j++) { uint64_t const flipped_key = key ^ (GU_LONG_LONG(0x01) << j); uint64_t const flipped_hash = HASH (&flipped_key, sizeof(flipped_key)); uint64_t flipped_bits = hash ^ flipped_hash; for (i = 0; i < 64; i++) { int const idx = j * 64 + i; flip_count[idx] += flipped_bits & GU_LONG_LONG(0x01); flipped_bits >>= 1; } } } /* print statistics */ char out_name [256] = { 0, }; snprintf(out_name, sizeof(out_name) - 1, "%s.out", argv[0]); FILE* const out = fopen(out_name, "w"); if (!out) { fprintf (stderr, "Could not open file for writing: '%s': %d (%s)", out_name, errno, strerror(errno)); return errno; } uint64_t base = n_keys; double min_stat = 1.0; double max_stat = 0.0; for (j = 0; j < 64; j++) { for (i = 0; i < 64; i++) { int const idx = j * 64 + i; double stat = (((double)(flip_count[idx]))/base); min_stat = min_stat > stat ? stat : min_stat; max_stat = max_stat < stat ? stat : max_stat; fprintf (out, "%6.4f%c", stat, 63 == i ? '\n' : '\t'); } } fclose(out); printf ("%6.4f : %6.4f (delta: %6.4f)\n", min_stat, max_stat, max_stat - min_stat); return 0; } galera-3-25.3.20/galerautils/src/gu_conf.h0000644000015300001660000000112513042054732020031 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy /** * @file * Configuration interface for libgalerautils * * $Id$ */ #ifndef _gu_conf_h_ #define _gu_conf_h_ #ifdef __cplusplus extern "C" { #endif /* Logging options */ #include #include "gu_log.h" extern int gu_conf_set_log_file (FILE* file); extern int gu_conf_set_log_callback (gu_log_cb_t callback); extern int gu_conf_self_tstamp_on (); extern int gu_conf_self_tstamp_off (); extern int gu_conf_debug_on (); extern int gu_conf_debug_off (); #ifdef __cplusplus } #endif #endif // _gu_conf_h_ galera-3-25.3.20/galerautils/src/gu_to.c0000644000015300001660000002651213042054732017530 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! \file \brief Total order access "class" implementation. * Although gcs_repl() and gcs_recv() calls return sequence * numbers in total order, there are concurrency issues between * application threads and they can grab critical section * mutex out of order. Wherever total order access to critical * section is required, these functions can be used to do this. */ #include #include #include #include // abort() #include "gu_log.h" #include "gu_assert.h" #include "gu_mem.h" #include "gu_mutex.h" #include "gu_to.h" #define TO_USE_SIGNAL 1 typedef enum { HOLDER = 0, //!< current TO holder WAIT, //!< actively waiting in the queue CANCELED, //!< Waiter has canceled its to request INTERRUPTED,//!< marked to be interrupted RELEASED, //!< has been released, free entry now } waiter_state_t; typedef struct { #ifdef TO_USE_SIGNAL gu_cond_t cond; #else pthread_mutex_t mtx; // have to use native pthread for double locking #endif waiter_state_t state; } to_waiter_t; struct gu_to { volatile gu_seqno_t seqno; size_t used; /* number of active waiters */ ssize_t qlen; size_t qmask; to_waiter_t* queue; gu_mutex_t lock; }; /** Returns pointer to the waiter with the given seqno */ static inline to_waiter_t* to_get_waiter (gu_to_t* to, gu_seqno_t seqno) { // Check for queue overflow. Tell application that it should wait. if (seqno >= to->seqno + to->qlen) { return NULL; } return (to->queue + (seqno & to->qmask)); } gu_to_t *gu_to_create (int len, gu_seqno_t seqno) { gu_to_t *ret; assert (seqno >= 0); if (len <= 0) { gu_error ("Negative length parameter: %d", len); return NULL; } ret = GU_CALLOC (1, gu_to_t); if (ret) { /* Make queue length a power of 2 */ ret->qlen = 1; while (ret->qlen < len) { // unsigned, can be bigger than any integer ret->qlen = ret->qlen << 1; } ret->qmask = ret->qlen - 1; ret->seqno = seqno; ret->queue = GU_CALLOC (ret->qlen, to_waiter_t); if (ret->queue) { ssize_t i; for (i = 0; i < ret->qlen; i++) { to_waiter_t *w = ret->queue + i; #ifdef TO_USE_SIGNAL gu_cond_init (&w->cond, NULL); #else pthread_mutex_init (&w->mtx, NULL); #endif w->state = RELEASED; } gu_mutex_init (&ret->lock, NULL); return ret; } gu_free (ret); } return NULL; } long gu_to_destroy (gu_to_t** to) { gu_to_t *t = *to; long ret; ssize_t i; gu_mutex_lock (&t->lock); if (t->used) { gu_mutex_unlock (&t->lock); return -EBUSY; } for (i = 0; i < t->qlen; i++) { to_waiter_t *w = t->queue + i; #ifdef TO_USE_SIGNAL if (gu_cond_destroy (&w->cond)) { // @todo: what if someone is waiting? gu_warn ("Failed to destroy condition %d. Should not happen", i); } #else if (pthread_mutex_destroy (&w->mtx)) { // @todo: what if someone is waiting? gu_warn ("Failed to destroy mutex %d. Should not happen", i); } #endif } t->qlen = 0; gu_mutex_unlock (&t->lock); /* What else can be done here? */ ret = gu_mutex_destroy (&t->lock); if (ret) return -ret; // application can retry gu_free (t->queue); gu_free (t); *to = NULL; return 0; } long gu_to_grab (gu_to_t* to, gu_seqno_t seqno) { long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock(&to->lock))) { gu_fatal("Mutex lock failed (%d): %s", err, strerror(err)); abort(); } if (seqno < to->seqno) { gu_mutex_unlock(&to->lock); return -ECANCELED; } if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ switch (w->state) { case INTERRUPTED: w->state = RELEASED; err = -EINTR; break; case CANCELED: err = -ECANCELED; break; case RELEASED: if (seqno == to->seqno) { w->state = HOLDER; } else if (seqno < to->seqno) { gu_error("Trying to grab outdated seqno"); err = -ECANCELED; } else { /* seqno > to->seqno, wait for my turn */ w->state = WAIT; to->used++; #ifdef TO_USE_SIGNAL gu_cond_wait(&w->cond, &to->lock); #else pthread_mutex_lock (&w->mtx); pthread_mutex_unlock (&to->lock); pthread_mutex_lock (&w->mtx); // wait for unlock by other thread pthread_mutex_lock (&to->lock); pthread_mutex_unlock (&w->mtx); #endif to->used--; switch (w->state) { case WAIT:// should be most probable assert (seqno == to->seqno); w->state = HOLDER; break; case INTERRUPTED: w->state = RELEASED; err = -EINTR; break; case CANCELED: err = -ECANCELED; break; case RELEASED: /* this waiter has been cancelled */ assert(seqno < to->seqno); err = -ECANCELED; break; default: gu_fatal("Invalid cond wait exit state %d, seqno %llu(%llu)", w->state, seqno, to->seqno); abort(); } } break; default: gu_fatal("TO queue over wrap"); abort(); } gu_mutex_unlock(&to->lock); return err; } static inline long to_wake_waiter (to_waiter_t* w) { long err = 0; if (w->state == WAIT) { #ifdef TO_USE_SIGNAL err = gu_cond_signal (&w->cond); #else err = pthread_mutex_unlock (&w->mtx); #endif if (err) { gu_fatal ("gu_cond_signal failed: %d", err); } } return err; } static inline void to_release_and_wake_next (gu_to_t* to, to_waiter_t* w) { w->state = RELEASED; /* * Iterate over CANCELED waiters and set states as RELEASED * We look for waiter in the head of queue, which guarantees that * to_get_waiter() will always return a valid waiter pointer */ for (to->seqno++; (w = to_get_waiter(to, to->seqno)) && w && w->state == CANCELED; to->seqno++) { w->state = RELEASED; } to_wake_waiter (w); } long gu_to_release (gu_to_t *to, gu_seqno_t seqno) { long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock(&to->lock))) { gu_fatal("Mutex lock failed (%d): %s", err, strerror(err)); abort(); } if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ if (seqno == to->seqno) { to_release_and_wake_next (to, w); } else if (seqno > to->seqno) { if (w->state != CANCELED) { gu_fatal("Illegal state in premature release: %d", w->state); abort(); } /* Leave state CANCELED so that real releaser can iterate */ } else { /* */ if (w->state != RELEASED) { gu_fatal("Outdated seqno and state not RELEASED: %d", w->state); abort(); } } gu_mutex_unlock(&to->lock); return err; } gu_seqno_t gu_to_seqno (gu_to_t* to) { return to->seqno - 1; } long gu_to_cancel (gu_to_t *to, gu_seqno_t seqno) { long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock (&to->lock))) { gu_fatal("Mutex lock failed (%d): %s", err, strerror(err)); abort(); } // Check for queue overflow. This is totally unrecoverable. Abort. if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); abort(); } /* we have a valid waiter now */ if ((seqno > to->seqno) || (seqno == to->seqno && w->state != HOLDER)) { err = to_wake_waiter (w); w->state = CANCELED; } else if (seqno == to->seqno && w->state == HOLDER) { gu_warn("tried to cancel current TO holder, state %d seqno %llu", w->state, seqno); err = -ECANCELED; } else { gu_warn("trying to cancel used seqno: state %d cancel seqno = %llu, " "TO seqno = %llu", w->state, seqno, to->seqno); err = -ECANCELED; } gu_mutex_unlock (&to->lock); return err; } long gu_to_self_cancel(gu_to_t *to, gu_seqno_t seqno) { long err = 0; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock (&to->lock))) { gu_fatal("Mutex lock failed (%d): %s", err, strerror(err)); abort(); } if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ if (seqno > to->seqno) { // most probable case w->state = CANCELED; } else if (seqno == to->seqno) { // have to wake the next waiter as if we grabbed and now releasing TO to_release_and_wake_next (to, w); } else { // (seqno < to->seqno) // This waiter must have been canceled or even released by preceding // waiter. Do nothing. } gu_mutex_unlock(&to->lock); return err; } long gu_to_interrupt (gu_to_t *to, gu_seqno_t seqno) { long rcode = 0; long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock (&to->lock))) { gu_fatal("Mutex lock failed (%d): %s", err, strerror(err)); abort(); } if (seqno >= to->seqno) { if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ switch (w->state) { case HOLDER: gu_debug ("trying to interrupt in use seqno: seqno = %llu, " "TO seqno = %llu", seqno, to->seqno); /* gu_mutex_unlock (&to->lock); */ rcode = -ERANGE; break; case CANCELED: gu_debug ("trying to interrupt canceled seqno: seqno = %llu, " "TO seqno = %llu", seqno, to->seqno); /* gu_mutex_unlock (&to->lock); */ rcode = -ERANGE; break; case WAIT: gu_debug ("signaling to interrupt wait seqno: seqno = %llu, " "TO seqno = %llu", seqno, to->seqno); rcode = to_wake_waiter (w); case RELEASED: w->state = INTERRUPTED; break; case INTERRUPTED: gu_debug ("TO waiter interrupt already seqno: seqno = %llu, " "TO seqno = %llu", seqno, to->seqno); break; } } else { gu_debug ("trying to interrupt used seqno: cancel seqno = %llu, " "TO seqno = %llu", seqno, to->seqno); /* gu_mutex_unlock (&to->lock); */ rcode = -ERANGE; } gu_mutex_unlock (&to->lock); return rcode; } galera-3-25.3.20/galerautils/src/gu_uuid.h0000644000015300001660000000552713042054732020064 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Universally Unique IDentifier. RFC 4122. * Time-based implementation. * */ #ifndef _gu_uuid_h_ #define _gu_uuid_h_ #include "gu_types.h" #ifdef __cplusplus extern "C" { #endif /*! UUID internally is represented as a BE integer which allows using * memcmp() as comparison function and straightforward printing */ #define GU_UUID_LEN 16 typedef struct { uint8_t data[GU_UUID_LEN]; } gu_uuid_t; extern const gu_uuid_t GU_UUID_NIL; /*! length of string representation */ #define GU_UUID_STR_LEN 36 /*! Macros for pretty printing */ #define GU_UUID_FORMAT \ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" #define GU_UUID_ARGS(uuid) \ (uuid)->data[ 0], (uuid)->data[ 1], (uuid)->data[ 2], (uuid)->data[ 3],\ (uuid)->data[ 4], (uuid)->data[ 5], (uuid)->data[ 6], (uuid)->data[ 7],\ (uuid)->data[ 8], (uuid)->data[ 9], (uuid)->data[10], (uuid)->data[11],\ (uuid)->data[12], (uuid)->data[13], (uuid)->data[14], (uuid)->data[15] /* this is used for scanf, variables are by reference */ #define GU_UUID_FORMAT_SCANF \ "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx" #define GU_UUID_ARGS_SCANF(uuid) \ &(uuid)->data[ 0], &(uuid)->data[ 1], &(uuid)->data[ 2], &(uuid)->data[ 3],\ &(uuid)->data[ 4], &(uuid)->data[ 5], &(uuid)->data[ 6], &(uuid)->data[ 7],\ &(uuid)->data[ 8], &(uuid)->data[ 9], &(uuid)->data[10], &(uuid)->data[11],\ &(uuid)->data[12], &(uuid)->data[13], &(uuid)->data[14], &(uuid)->data[15] /*! * Generates new UUID. * If node is NULL, will generate random (if /dev/urand is present) or * pseudorandom data instead. * @param uuid * pointer to uuid_t * @param node * some unique data that goes in place of "node" field in the UUID * @param node_len * length of the node buffer */ extern void gu_uuid_generate (gu_uuid_t* uuid, const void* node, size_t node_len); /*! * Compare two UUIDs according to RFC * @return -1, 0, 1 if left is respectively less, equal or greater than right */ extern long gu_uuid_compare (const gu_uuid_t* left, const gu_uuid_t* right); /*! * Compare ages of two UUIDs * @return -1, 0, 1 if left is respectively younger, equal or older than right */ extern long gu_uuid_older (const gu_uuid_t* left, const gu_uuid_t* right); /*! * Print UUID into buffer * @return Number of bytes printed (not including trailing '\0') or -1 on error. */ extern ssize_t gu_uuid_print(const gu_uuid_t* uuid, char* buf, size_t buflen); /*! * Scan UUID from buffer * @return Number of bytes read (should match to sizeof(uuid)) or -1 on error */ extern ssize_t gu_uuid_scan(const char* buf, size_t buflen, gu_uuid_t* uuid); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_uuid_h_ */ galera-3-25.3.20/galerautils/src/gu_progress.hpp0000644000015300001660000000464213042054732021317 0ustar jenkinsjenkins/* * Copyright (C) 2016 Codership Oy */ #ifndef __GU_PROGRESS__ #define __GU_PROGRESS__ #include "gu_logger.hpp" #include "gu_datetime.hpp" #include #include namespace gu { template class Progress { std::string const prefix_; std::string const units_; gu::datetime::Period const time_interval_; T const unit_interval_; T const total_; T current_; T last_size_; gu::datetime::Date last_time_; void report(gu::datetime::Date const now) { log_info << prefix_ << '(' << total_ << units_ << ")... " << std::fixed << std::setprecision(1) << (double(current_)/total_ * 100) << "% (" << current_ << units_ << ") complete."; last_time_ = now; } public: /* * @param p prefix to be printed in each report * (include trailing space) * @param u units to be printed next to numbers (empty string - no units) * (include space between number and units) * @param ti minimal time interval to report progress * @param ui minimal unit interval to report progress * @param t total amount of work in units */ Progress(const std::string& p, const std::string& u, const std::string& ti, T const ui, T const t) : prefix_ (p), units_ (u), time_interval_(ti), unit_interval_(ui), total_ (t), current_ (0), last_size_ (current_), last_time_ () { report(gu::datetime::Date::monotonic()); } void update(T const increment) { current_ += increment; if (current_ - last_size_ >= unit_interval_) { gu::datetime::Date const now(gu::datetime::Date::monotonic()); if (now - last_time_ >= time_interval_) report(now); last_size_ = current_; } } void finish() { current_ = total_; report(gu::datetime::Date::monotonic()); } }; /* class Progress */ } /* namespace gu */ #endif /* __GU_PROGRESS__ */ galera-3-25.3.20/galerautils/src/gu_atomic.h0000644000015300001660000000660113042054732020364 0ustar jenkinsjenkins// Copyright (C) 2013-2014 Codership Oy /** * @file Atomic memory access functions. At the moment these follow * __atomic_XXX convention from GCC. */ #ifndef GU_ATOMIC_H #define GU_ATOMIC_H #ifdef __cplusplus extern "C" { #endif // So far in tests full memory sync shows the most consistent performance - // and it's the safest. @todo: reassess this later. #define GU_ATOMIC_SYNC_DEFAULT GU_ATOMIC_SYNC_FULL #ifdef __GNUC__ #if defined(__ATOMIC_RELAXED) // use __atomic_XXX builtins #define GU_ATOMIC_SYNC_NONE __ATOMIC_RELAXED #define GU_ATOMIC_SYNC_DEPEND __ATOMIC_ACQ_REL #define GU_ATOMIC_SYNC_FULL __ATOMIC_SEQ_CST #define gu_atomic_fetch_and_add(ptr, val) \ __atomic_fetch_add(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_sub(ptr, val) \ __atomic_fetch_sub(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_or(ptr, val) \ __atomic_fetch_or(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_and(ptr, val) \ __atomic_fetch_and(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_xor(ptr, val) \ __atomic_fetch_xor(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_nand(ptr, val) \ __atomic_fetch_nand(ptr, val,GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_add_and_fetch(ptr, val) \ __atomic_add_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_sub_and_fetch(ptr, val) \ __atomic_sub_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_or_and_fetch(ptr, val) \ __atomic_or_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_and_and_fetch(ptr, val) \ __atomic_and_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_xor_and_fetch(ptr, val) \ __atomic_xor_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_nand_and_fetch(ptr, val) \ __atomic_nand_fetch(ptr, val,GU_ATOMIC_SYNC_DEFAULT) // stores contents of vptr into ptr #define gu_atomic_set(ptr, vptr) \ __atomic_store(ptr, vptr, GU_ATOMIC_SYNC_DEFAULT) // loads contents of ptr to vptr #define gu_atomic_get(ptr, vptr) \ __atomic_load(ptr, vptr, GU_ATOMIC_SYNC_DEFAULT) #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) // use __sync_XXX builtins #define GU_ATOMIC_SYNC_NONE 0 #define GU_ATOMIC_SYNC_DEPEND 0 #define GU_ATOMIC_SYNC_FULL 0 #define gu_atomic_fetch_and_add __sync_fetch_and_add #define gu_atomic_fetch_and_sub __sync_fetch_and_sub #define gu_atomic_fetch_and_or __sync_fetch_and_or #define gu_atomic_fetch_and_and __sync_fetch_and_and #define gu_atomic_fetch_and_xor __sync_fetch_and_xor #define gu_atomic_fetch_and_nand __sync_fetch_and_nand #define gu_atomic_add_and_fetch __sync_add_and_fetch #define gu_atomic_sub_and_fetch __sync_sub_and_fetch #define gu_atomic_or_and_fetch __sync_or_and_fetch #define gu_atomic_and_and_fetch __sync_and_and_fetch #define gu_atomic_xor_and_fetch __sync_xor_and_fetch #define gu_atomic_nand_and_fetch __sync_nand_and_fetch #define gu_atomic_set(ptr, vptr) \ while (!__sync_bool_compare_and_swap(ptr, *ptr, *vptr)); #define gu_atomic_get(ptr, vptr) *vptr = __sync_fetch_and_or(ptr, 0) #else #error "This GCC version does not support 8-byte atomics on this platform. Use GCC >= 4.7.x." #endif /* __ATOMIC_RELAXED */ #else /* __GNUC__ */ #error "Compiler not supported" #endif #ifdef __cplusplus } #endif #endif /* !GU_ATOMIC_H */ galera-3-25.3.20/galerautils/src/gu_assert.hpp0000644000015300001660000000127613042054732020754 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy /** * @file Assert macro definition * * $Id$ */ #ifndef _gu_assert_hpp_ #define _gu_assert_hpp_ #ifndef DEBUG_ASSERT #include #else #include #undef assert #include "gu_logger.hpp" /** Assert that sleeps instead of aborting the program, saving it for gdb */ #define assert(expr) \ if (!(expr)) { \ log_fatal << "Assertion (" << __STRING(expr) << ") failed"; \ while(1) sleep(1); \ } #endif /* DEBUG_ASSERT */ #endif /* _gu_assert_hpp_ */ galera-3-25.3.20/galerautils/src/gu_vec16.h0000644000015300001660000000352713042054732020040 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file 16-byte "vector" type and operations - mostly for checksums/hashes * * $Id$ */ #ifndef _gu_vec16_h_ #define _gu_vec16_h_ #include "gu_macros.h" #include "gu_byteswap.h" #include #include #include /* bool */ #ifdef __cplusplus extern "C" { #endif /* this type will generate SIMD instructions where possible: good for XORing */ typedef unsigned long gu_vec16__ __attribute__ ((vector_size (16))); typedef union gu_vec16 { gu_vec16__ vec_; uint64_t int_[2]; /* for equality we better use scalar type * since we need scalar return */ } gu_vec16_t; static GU_FORCE_INLINE gu_vec16_t gu_vec16_from_byte (unsigned char b) { gu_vec16_t ret; memset (&ret, b, sizeof(ret)); return ret; } static GU_FORCE_INLINE gu_vec16_t gu_vec16_from_ptr (const void* ptr) { gu_vec16_t ret; memcpy (&ret, ptr, sizeof(ret)); return ret; } static GU_FORCE_INLINE gu_vec16_t gu_vec16_xor (gu_vec16_t l, gu_vec16_t r) { gu_vec16_t ret; ret.vec_ = (l.vec_ ^ r.vec_); return ret; } static GU_FORCE_INLINE bool gu_vec16_neq (gu_vec16_t l, gu_vec16_t r) { return (l.int_[0] != r.int_[0] || l.int_[1] != r.int_[1]); } static GU_FORCE_INLINE bool gu_vec16_eq (gu_vec16_t l, gu_vec16_t r) { return !(gu_vec16_neq (l, r)); } static GU_FORCE_INLINE gu_vec16_t gu_vec16_bswap (gu_vec16_t x) { gu_vec16_t ret; ret.int_[0] = gu_bswap64 (x.int_[1]); ret.int_[1] = gu_bswap64 (x.int_[0]); return ret; } #ifdef __cplusplus } static GU_FORCE_INLINE gu_vec16_t operator^ (const gu_vec16_t& l, const gu_vec16_t& r) { return (gu_vec16_xor (l, r)); } static GU_FORCE_INLINE bool operator== (const gu_vec16_t& l, const gu_vec16_t& r) { return (gu_vec16_eq (l, r)); } #endif #endif /* _gu_vec16_h_ */ galera-3-25.3.20/galerautils/src/gu_digest.hpp0000644000015300001660000001027513042054732020731 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy /** * @file Message digest interface. * * $Id$ */ #ifndef GU_DIGEST_HPP #define GU_DIGEST_HPP #include "gu_hash.h" #include "gu_vec16.h" #include "gu_byteswap.hpp" #include "gu_serializable.hpp" #include "gu_macros.hpp" namespace gu { /* Just making MMH3 not derive from Digest reduced TrxHandle size from * 4560 bytes to 4256. 304 bytes of vtable pointers... */ class MMH3 { public: MMH3 () : ctx_() { gu_mmh128_init (&ctx_); } ~MMH3 () {} template static int digest (const void* const in, size_t size, T& out) { byte_t tmp[16]; gu_mmh128(in, size, tmp); int const s(std::min(sizeof(T), sizeof(tmp))); ::memcpy (&out, tmp, s); return s; } /* experimental */ template static T digest (const void* const in, size_t size) { switch (sizeof(T)) { case 1: return gu_mmh128_32(in, size); case 2: return gu_mmh128_32(in, size); case 4: return gu_mmh128_32(in, size); case 8: return gu_mmh128_64(in, size); } throw; } void append (const void* const buf, size_t const size) { gu_mmh128_append (&ctx_, buf, size); } template int gather (void* const buf) const { GU_COMPILE_ASSERT(size >= 16, wrong_buf_size); gather16 (buf); return 16; } int gather (void* const buf, size_t const size) const { byte_t tmp[16]; gather16(tmp); int const s(std::min(size, sizeof(tmp))); ::memcpy (buf, tmp, s); return s; } void gather16 (void* const buf) const { gu_mmh128_get (&ctx_, buf); } uint64_t gather8() const { return gu_mmh128_get64 (&ctx_); } uint32_t gather4() const { return gu_mmh128_get32 (&ctx_); } // a questionable feature template int operator() (T& out) const { return gather(&out); } private: gu_mmh128_ctx_t ctx_; }; /* class MMH3 */ template <> inline int MMH3::digest (const void* const in, size_t size, uint8_t& out) { out = gu_mmh128_32(in, size); return sizeof(out); } template <> inline int MMH3::digest (const void* const in, size_t size, uint16_t& out) { out = gu_mmh128_32(in, size); return sizeof(out); } template <> inline int MMH3::digest (const void* const in, size_t size, uint32_t& out) { out = gu_mmh128_32(in, size); return sizeof(out); } template <> inline int MMH3::digest (const void* const in, size_t size, uint64_t& out) { out = gu_mmh128_64(in, size); return sizeof(out); } template <> inline int MMH3::gather<8> (void* const out) const { *(static_cast(out)) = gather8(); return 8; } template <> inline int MMH3::gather<4> (void* const out) const { *(static_cast(out)) = gather4(); return 4; } typedef MMH3 Hash; class FastHash { public: template static int digest (const void* const in, size_t size, T& out) { byte_t tmp[16]; gu_fast_hash128(in, size, tmp); int const s(std::min(sizeof(T), sizeof(tmp))); ::memcpy (&out, tmp, s); return s; } /* experimental */ template static T digest (const void* const in, size_t size) { switch (sizeof(T)) { case 1: return gu_fast_hash32(in, size); case 2: return gu_fast_hash32(in, size); case 4: return gu_fast_hash32(in, size); case 8: return gu_fast_hash64(in, size); } throw; } }; /* FastHash */ template <> inline int FastHash::digest (const void* const in, size_t size, uint8_t& out) { out = gu_fast_hash32(in, size); return sizeof(out); } template <> inline int FastHash::digest (const void* const in, size_t size, uint16_t& out) { out = gu_fast_hash32(in, size); return sizeof(out); } template <> inline int FastHash::digest (const void* const in, size_t size, uint32_t& out) { out = gu_fast_hash32(in, size); return sizeof(out); } template <> inline int FastHash::digest (const void* const in, size_t size, uint64_t& out) { out = gu_fast_hash64(in, size); return sizeof(out); } } /* namespace gu */ #endif /* GU_DIGEST_HPP */ galera-3-25.3.20/galerautils/src/gu_macros.h0000644000015300001660000000353213042054732020374 0ustar jenkinsjenkins// Copyright (C) 2007-2013 Codership Oy /** * @file Miscellaneous macros * * $Id$ */ #ifndef _gu_macros_h_ #define _gu_macros_h_ /* * Platform-dependent macros */ #if defined(_MSC_VER) # define GU_NORETURN __declspec(noreturn) # define GU_INLINE __forceinline # define GU_FORCE_INLINE __forceinline # define GU_UNUSED # define GU_LONG(x) (x) # define GU_ULONG(x) (x) # define GU_LONG_LONG(x) (x) # define GU_ULONG_LONG(x) (x) # define GU_DEBUG_NORETURN #else /* !defined(_MSC_VER) */ # define GU_NORETURN __attribute__((noreturn)) # define GU_INLINE inline # define GU_FORCE_INLINE inline __attribute__((always_inline)) # define GU_UNUSED __attribute__((unused)) # define GU_LONG(x) (x##L) # define GU_ULONG(x) (x##LU) # define GU_LONG_LONG(x) (x##LL) # define GU_ULONG_LONG(x) (x##LLU) # ifndef __OPTIMIZE__ # define GU_DEBUG_NORETURN abort(); # else # define GU_DEBUG_NORETURN # endif #endif /* !defined(_MSC_VER) */ /* * End of paltform-dependent macros */ /* "Shamelessly stolen" (tm) goods from Linux kernel */ /* * min()/max() macros that also do * strict type-checking.. See the * "unnecessary" pointer comparison. */ #if 0 // typeof() is not in C99 #define GU_MAX(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x > _y ? _x : _y; }) #define GU_MIN(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x < _y ? _x : _y; }) #endif #define gu_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #if __GNUC__ >= 3 # define gu_likely(x) __builtin_expect((x), 1) # define gu_unlikely(x) __builtin_expect((x), 0) #else # define gu_likely(x) (x) # define gu_unlikely(x) (x) #endif #endif /* _gu_macros_h_ */ galera-3-25.3.20/galerautils/doc/0000755000015300001660000000000013042054732016217 5ustar jenkinsjenkinsgalera-3-25.3.20/galerautils/doc/Doxyfile0000644000015300001660000014366713042054732017746 0ustar jenkinsjenkins# Doxyfile 1.4.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = GCS # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../src # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.c *.h *.hpp # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO galera-3-25.3.20/galerautils/SConscript0000644000015300001660000000014013042054732017457 0ustar jenkinsjenkins# SConscript for building galerautils SConscript(Split('''src/SConscript tests/SConscript''')) galera-3-25.3.20/galerautils/ChangeLog0000644000015300001660000000270213042054732017225 0ustar jenkinsjenkins2009-09-20 Alex Added RegEx class for matching strings with POSIX regular expressions. Renamed URL class to URI to better reflect what it does. Added get_host(), get_user() and get_port() methods and a unit test. Modularized galerautils++ unit tests. Version 0.3.5 2009-09-17 Alex Added gu_utils.hpp to hold general-purpose templates and functions (now with to_string() template functions). Logger class cleanups. Exception class cleanups. Added stack tracing macro. New Throw class for composing verbose exception messages. Version 0.3.4 2009-09-01 Alex Added a simple option line parser. Some optimizations and cleanups. Version 0.3.3 2009-07-07 Alex Slightly changed gu_fifo interface. Added gu_lock_step object. Version 0.3.2. 2009-06-21 Alex Moved TO monitor module from GCS to galerautils. Version 0.3.1. 2009-06-08 Alex Started galerautils++ project. Added galerautils.hpp and C++-style logger and assert variants. Version 0.3.0. 2008-11-16 Alex Added gu_fifo_t class for mallocless FIFO queue. Version 0.2.9. 2008-03-23 Alex Added gu_timeval_diff() and gu_clock_diff() functions. Bumped interface version. 2008-02-21 Teemu Made DBUG thread safe. 2007-11-01 Alex Fixed thread safe compilation without MySQL Tagged release 0.2.5 2007-10-18 Alex Fixed compilation. Added gtohl/htogl/gtohs/htogs functions. Tagged release 0.2.4 galera-3-25.3.20/galerautils/README0000644000015300001660000000112013042054732016324 0ustar jenkinsjenkinslibgalerautils is a library of utilities commonly used by Galera project. Current release includes logging, mutex and malloc debug functions and convenience macros. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY, to the extent permitted by law; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. libgalerautils is free software. Please see the file COPYING for details. For documentation, please see the files in the doc subdirectory. For building and installation instructions please see the INSTALL file. galera-3-25.3.20/galerautils/tests/0000755000015300001660000000000013042054732016614 5ustar jenkinsjenkinsgalera-3-25.3.20/galerautils/tests/gu_datetime_test.hpp0000644000015300001660000000034113042054732022651 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ #ifndef __gu_datetime_test_hpp__ #define __gu_datetime_test_hpp__ #include Suite* gu_datetime_suite(); #endif // __gu_datetime_test_hpp__ galera-3-25.3.20/galerautils/tests/gu_net_test.hpp0000644000015300001660000000030713042054732021645 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_net_test__ #define __gu_net_test__ #include extern Suite *gu_net_suite(void); #endif /* __gu_net_test__ */ galera-3-25.3.20/galerautils/tests/gu_config_test.hpp0000644000015300001660000000032313042054732022322 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy // $Id$ #ifndef __gu_config_test__ #define __gu_config_test__ #include extern Suite *gu_config_suite(void); #endif /* __gu_config_test__ */ galera-3-25.3.20/galerautils/tests/gu_fnv_test.h0000644000015300001660000000030713042054732021310 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_fnv_test__ #define __gu_fnv_test__ #include extern Suite *gu_fnv_suite(void); #endif /* __gu_fnv_test__ */ galera-3-25.3.20/galerautils/tests/gu_mem_pool_test.cpp0000644000015300001660000000230113042054732022655 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy // $Id$ #define TEST_SIZE 1024 #include "gu_mem_pool.hpp" #include "gu_mem_pool_test.hpp" START_TEST (unsafe) { gu::MemPoolUnsafe mp(10, 1, "unsafe"); void* const buf0(mp.acquire()); fail_if(NULL == buf0); void* const buf1(mp.acquire()); fail_if(NULL == buf1); fail_if(buf0 == buf1); mp.recycle(buf0); void* const buf2(mp.acquire()); fail_if(NULL == buf2); fail_if(buf0 != buf2); log_info << mp; mp.recycle(buf1); mp.recycle(buf2); } END_TEST START_TEST (safe) { gu::MemPoolSafe mp(10, 1, "safe"); void* const buf0(mp.acquire()); fail_if(NULL == buf0); void* const buf1(mp.acquire()); fail_if(NULL == buf1); fail_if(buf0 == buf1); mp.recycle(buf0); void* const buf2(mp.acquire()); fail_if(NULL == buf2); fail_if(buf0 != buf2); log_info << mp; mp.recycle(buf1); mp.recycle(buf2); } END_TEST Suite *gu_mem_pool_suite(void) { Suite *s = suite_create("gu::MemPool"); TCase *tc_mem = tcase_create("gu_mem_pool"); suite_add_tcase (s, tc_mem); tcase_add_test(tc_mem, unsafe); tcase_add_test(tc_mem, safe); return s; } galera-3-25.3.20/galerautils/tests/gu_hash_test.h0000644000015300001660000000031313042054732021437 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_hash_test__ #define __gu_hash_test__ #include extern Suite *gu_hash_suite(void); #endif /* __gu_hash_test__ */ galera-3-25.3.20/galerautils/tests/gu_uuid_test.c0000644000015300001660000000214713042054732021464 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include #include #include "../src/gu_log.h" #include "../src/gu_uuid.h" #include "gu_uuid_test.h" START_TEST (gu_uuid_test) { size_t uuid_num = 10; gu_uuid_t uuid[uuid_num]; size_t i; uuid[0] = GU_UUID_NIL; gu_uuid_generate (&uuid[0], NULL, 0); fail_if (!memcmp (&uuid[0], &GU_UUID_NIL, sizeof(gu_uuid_t))); fail_if (!gu_uuid_compare(&uuid[0], &GU_UUID_NIL)); for (i = 1; i < uuid_num; i++) { uuid[i] = GU_UUID_NIL; gu_uuid_generate (&uuid[i], NULL, 0); fail_if (!gu_uuid_compare(&uuid[i], &GU_UUID_NIL)); fail_if (!gu_uuid_compare(&uuid[i], &uuid[i - 1])); fail_if (1 != gu_uuid_older (&uuid[i - 1], &uuid[i])); fail_if (-1 != gu_uuid_older (&uuid[i], &uuid[i - 1])); } } END_TEST Suite *gu_uuid_suite(void) { Suite *suite = suite_create("Galera UUID utils"); TCase *tcase = tcase_create("gu_uuid"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gu_uuid_test); return suite; } galera-3-25.3.20/galerautils/tests/gu_bswap_test.c0000644000015300001660000000267513042054732021640 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #include #include #include "gu_bswap_test.h" #include "../src/gu_byteswap.h" START_TEST (gu_bswap_test) { // need volatile to prevent compile-time optimization volatile uint16_t s = 0x1234; volatile uint32_t i = 0x12345678; volatile uint64_t l = 0x1827364554637281LL; uint16_t sle, sbe; uint32_t ile, ibe; uint64_t lle, lbe; // first conversion sle = gu_le16(s); sbe = gu_be16(s); ile = gu_le32(i); ibe = gu_be32(i); lle = gu_le64(l); lbe = gu_be64(l); #if __BYTE_ORDER == __LITTLE_ENDIAN fail_if (s != sle); fail_if (i != ile); fail_if (l != lle); fail_if (s == sbe); fail_if (i == ibe); fail_if (l == lbe); #else fail_if (s == sle); fail_if (i == ile); fail_if (l == lle); fail_if (s != sbe); fail_if (i != ibe); fail_if (l != lbe); #endif /* __BYTE_ORDER */ // second conversion sle = gu_le16(sle); sbe = gu_be16(sbe); ile = gu_le32(ile); ibe = gu_be32(ibe); lle = gu_le64(lle); lbe = gu_be64(lbe); fail_if (s != sle); fail_if (i != ile); fail_if (l != lle); fail_if (s != sbe); fail_if (i != ibe); fail_if (l != lbe); } END_TEST Suite *gu_bswap_suite(void) { Suite *s = suite_create("Galera byteswap functions"); TCase *tc = tcase_create("gu_bswap"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_bswap_test); return s; } galera-3-25.3.20/galerautils/tests/gu_atomic_test.cpp0000644000015300001660000001027713042054732022335 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #include "../src/gu_atomic.hpp" #include "gu_atomic_test.hpp" #include "gu_limits.h" #include START_TEST(test_sanity_c) { int64_t i, j, k; i = 1; j = 0; k = 3; gu_atomic_set(&i, &j); fail_if(j != 0); fail_if(i != 0); gu_atomic_get(&i, &k); fail_if(i != 0); fail_if(k != 0); j = gu_atomic_fetch_and_add (&i, 7); fail_if(j != 0); fail_if(i != 7); j = gu_atomic_fetch_and_sub (&i, 10); fail_if(j != 7); fail_if(i != -3); j = gu_atomic_fetch_and_or (&i, 15); fail_if(j != -3); fail_if(i != -1); j = gu_atomic_fetch_and_and (&i, 5); fail_if(j != -1); fail_if(i != 5); j = gu_atomic_fetch_and_xor (&i, 3); fail_if(j != 5); fail_if(i != 6); j = gu_atomic_fetch_and_nand(&i, 15); fail_if(j != 6); fail_if(i != -7); j = gu_atomic_add_and_fetch (&i, 7); fail_if(j != 0); fail_if(i != 0); j = gu_atomic_sub_and_fetch (&i, -2); fail_if(j != 2); fail_if(i != 2); j = gu_atomic_or_and_fetch (&i, 5); fail_if(j != 7); fail_if(i != 7); j = gu_atomic_and_and_fetch (&i, 13); fail_if(j != 5); fail_if(i != 5); j = gu_atomic_xor_and_fetch (&i, 15); fail_if(j != 10); fail_if(i != 10); j = gu_atomic_nand_and_fetch(&i, 7); fail_if(j != -3); fail_if(i != -3); } END_TEST START_TEST(test_sanity_cxx) { gu::Atomic i(1); int64_t const k(3); fail_if(i() != 1); fail_if(i() == k); fail_if((i = k) != k); fail_if(i() != k); fail_if(i.fetch_and_zero() != k); fail_if(i() != 0); fail_if(i.fetch_and_add(5) != 0); fail_if(i() != 5); fail_if(i.add_and_fetch(3) != 8); fail_if(i() != 8); fail_if((++i)() != 9); fail_if(i() != 9); fail_if((--i)() != 8); fail_if(i() != 8); i += 3; fail_if(i() != 11); } END_TEST // we want it sufficiently long to test above least 4 bytes, but sufficiently // short to avoid overflow static long long const increment(333333333333LL); // number of add/sub thread pairs static int const n_threads(8); // maximum iterations number (to guarantee no overflow) static int const max_iter(GU_LLONG_MAX/increment/n_threads); // number of iterations capped at 1M, just in case static int const iterations(max_iter > 1000000 ? 1000000 : max_iter); static void* add_loop(void* arg) { int64_t* const var(static_cast(arg)); for (int i(iterations); --i;) { gu_atomic_fetch_and_add(var, increment); } return NULL; } static void* sub_loop(void* arg) { int64_t* const var(static_cast(arg)); for (int i(iterations); --i;) { gu_atomic_fetch_and_sub(var, increment); } return NULL; } static int start_threads(pthread_t* threads, int64_t* var) { for (int i(0); i < n_threads; ++i) { pthread_t* const add_thr(&threads[i * 2]); pthread_t* const sub_thr(add_thr + 1); int const add_err(pthread_create(add_thr, NULL, add_loop, var)); int const sub_err(pthread_create(sub_thr, NULL, sub_loop, var)); if (add_err != 0) return add_err; if (sub_err != 0) return sub_err; } return 0; } static int join_threads(pthread_t* threads) { for (int i(0); i < n_threads; ++i) { pthread_t* const add_thr(&threads[i * 2]); pthread_t* const sub_thr(add_thr + 1); int const add_err(pthread_join(*add_thr, NULL)); int const sub_err(pthread_join(*sub_thr, NULL)); if (add_err != 0) return add_err; if (sub_err != 0) return sub_err; } return 0; } // This may not catch concurrency problems every time. But sometimes it should // (if there are any). START_TEST(test_concurrency) { fail_if(iterations < 1000000); int64_t var(0); pthread_t threads[n_threads * 2]; fail_if(start_threads(threads, &var)); fail_if(join_threads(threads)); fail_if(0 != var); } END_TEST Suite* gu_atomic_suite() { TCase* t1 = tcase_create ("sanity"); tcase_add_test (t1, test_sanity_c); tcase_add_test (t1, test_sanity_cxx); TCase* t2 = tcase_create ("concurrency"); tcase_add_test (t2, test_concurrency); tcase_set_timeout(t2, 60); Suite* s = suite_create ("gu::Atomic"); suite_add_tcase (s, t1); suite_add_tcase (s, t2); return s; } galera-3-25.3.20/galerautils/tests/gu_uri_test.hpp0000644000015300001660000000030713042054732021656 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_uri_test__ #define __gu_uri_test__ #include extern Suite *gu_uri_suite(void); #endif /* __gu_uri_test__ */ galera-3-25.3.20/galerautils/tests/gu_crc32c_test.c0000644000015300001660000000641213042054732021574 0ustar jenkinsjenkins/* * Copyright (C) 2013-2014 Codership Oy * * $Id$ */ #include "../src/gu_crc32c.h" #include "gu_crc32c_test.h" #include #define long_input \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" #define long_output 0x7e5806b3 struct test_pair { const char* input; uint32_t output; }; //#define test_vector_length 6 /* * boost::crc_optimal<32, 0x1EDC6F41, 0, 0, true, true> crc; */ static struct test_pair test_vector[] = { { "", 0x00000000 }, { "1", 0x90f599e3 }, { "22", 0x47b26cf9 }, { "333", 0x4cb6e5c8 }, { "4444", 0xfb8150f7 }, { "55555", 0x23874b2f }, { "666666", 0xfad65244 }, { "7777777", 0xe4cbaa36 }, { "88888888", 0xda8901c2 }, { "123456789", 0xe3069283 }, // taken from SCTP mailing list { "My", 0xc7600404 }, // taken from { "test", 0x86a072c0 }, // http://www.zorc.breitbandkatze.de/crc.html { "vector", 0xa0b8f38a }, { long_input, long_output}, { NULL, 0x0 } }; static void test_function(void) { int i; for (i = 0; test_vector[i].input != NULL; i++) { const char* const input = test_vector[i].input; uint32_t const output = test_vector[i].output; uint32_t ret = gu_crc32c(input, strlen(input)); fail_if(ret != output, "Input '%s' resulted in %#08x, expected %#08x\n", input, ret, output); } const char* const input = long_input; uint32_t const output = long_output; int const size = strlen(input); int offset = 0; gu_crc32c_t crc; gu_crc32c_init(&crc); #define CRC_APPEND(x) gu_crc32c_append(&crc, &input[offset], x); offset += x; CRC_APPEND(1); CRC_APPEND(3); CRC_APPEND(5); CRC_APPEND(7); CRC_APPEND(13); CRC_APPEND(15); mark_point(); CRC_APPEND(0); CRC_APPEND(27); CRC_APPEND(43); CRC_APPEND(64); int tail = size - offset; fail_if (tail < 0); CRC_APPEND(tail); uint32_t ret = gu_crc32c_get (crc); fail_if (ret != output, "Generated %#08x, expected %#08x\n", ret, output); } START_TEST(test_Sarwate) { gu_crc32c_func = crc32cSarwate; test_function(); } END_TEST START_TEST(test_SlicingBy4) { gu_crc32c_func = crc32cSlicingBy4; test_function(); } END_TEST START_TEST(test_SlicingBy8) { gu_crc32c_func = crc32cSlicingBy8; test_function(); } END_TEST // will run a hardware test, if available START_TEST(test_hardware) { gu_crc32c_configure(); test_function(); } END_TEST Suite *gu_crc32c_suite(void) { Suite *suite = suite_create("CRC32C implementation"); TCase *sw = tcase_create("test_sw"); suite_add_tcase (suite, sw); tcase_add_test (sw, test_Sarwate); tcase_add_test (sw, test_SlicingBy4); tcase_add_test (sw, test_SlicingBy8); TCase *hw = tcase_create("test_hw"); suite_add_tcase (suite, hw); tcase_add_test (hw, test_hardware); return suite; } galera-3-25.3.20/galerautils/tests/gu_tests++.hpp0000644000015300001660000000221113042054732021304 0ustar jenkinsjenkins// Copyright (C) 2009-2014 Codership Oy // $Id$ /*! * @file: package specific part of the main test file. */ #ifndef __gu_testspp_hpp__ #define __gu_testspp_hpp__ #define LOG_FILE "gu_tests++.log" #include "gu_atomic_test.hpp" #include "gu_vector_test.hpp" #include "gu_string_test.hpp" #include "gu_vlq_test.hpp" #include "gu_digest_test.hpp" #include "gu_mem_pool_test.hpp" #include "gu_alloc_test.hpp" #include "gu_rset_test.hpp" #include "gu_string_utils_test.hpp" #include "gu_uri_test.hpp" #include "gu_config_test.hpp" #include "gu_net_test.hpp" #include "gu_datetime_test.hpp" #include "gu_histogram_test.hpp" #include "gu_stats_test.hpp" #include "gu_thread_test.hpp" typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gu_atomic_suite, gu_vector_suite, gu_string_suite, gu_vlq_suite, gu_digest_suite, gu_mem_pool_suite, gu_alloc_suite, gu_rset_suite, gu_string_utils_suite, gu_uri_suite, gu_config_suite, gu_net_suite, gu_datetime_suite, gu_histogram_suite, gu_stats_suite, gu_thread_suite, 0 }; #endif /* __gu_testspp_hpp__ */ galera-3-25.3.20/galerautils/tests/gu_alloc_test.cpp0000644000015300001660000000426313042054732022151 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy // $Id$ #include "../src/gu_alloc.hpp" #include "gu_alloc_test.hpp" class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; START_TEST (basic) { ssize_t const extra_size(1 << 12); /* extra size to force new page */ gu::byte_t reserved[extra_size]; const char test0[] = "test0"; ssize_t const test0_size(sizeof(test0)); const char test1[] = "test1"; ssize_t const test1_size(sizeof(test1) + extra_size); TestBaseName test_name("gu_alloc_test"); gu::Allocator a(test_name, reserved, sizeof(reserved), sizeof(test1), 1 << 16); mark_point(); void* p; size_t r, s = 0; bool n; r = 0; s += r; mark_point(); p = a.alloc(r, n); fail_if (p != 0); fail_if (n); fail_if (a.size() != s); r = test0_size; s += r; mark_point(); p = a.alloc(r, n); fail_if (0 == p); fail_if (n); fail_if (a.size() != s); strcpy (reinterpret_cast(p), test0); r = test1_size; s += r; mark_point(); p = a.alloc(r, n); fail_if (0 == p); fail_if (!n); /* new page must be allocated */ fail_if (a.size() != s); strcpy (reinterpret_cast(p), test1); r = 0; s += r; mark_point(); p = a.alloc(r, n); fail_if (p != 0); fail_if (n); fail_if (a.size() != s); #ifdef GU_ALLOCATOR_DEBUG std::vector out; out.reserve (a.count()); mark_point(); size_t out_size = a.gather (out); fail_if (out_size != test0_size + test1_size); fail_if (out.size() != 2); fail_if (out[0].size != test0_size); fail_if (strcmp(reinterpret_cast(out[0].ptr), test0)); fail_if (out[1].size != test1_size); fail_if (strcmp(reinterpret_cast(out[1].ptr), test1)); #endif /* GU_ALLOCATOR_DEBUG */ } END_TEST Suite* gu_alloc_suite () { TCase* t = tcase_create ("Allocator"); tcase_add_test (t, basic); Suite* s = suite_create ("gu::Allocator"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_utils_test.c0000644000015300001660000000570613042054732021662 0ustar jenkinsjenkins// Copyright (C) 2010 Codership Oy // $Id$ #include #include "gu_utils_test.h" #include "../src/gu_utils.h" #include #include START_TEST (gu_strconv_test) { long long llret; const char* strret; strret = gu_str2ll ("-1a", &llret); fail_if (strret[0] != 'a'); fail_if (-1 != llret); strret = gu_str2ll ("1K", &llret); fail_if (strret[0] != '\0'); fail_if ((1 << 10) != llret); strret = gu_str2ll ("-1m", &llret); fail_if (strret[0] != '\0'); fail_if (-(1 << 20) != llret); strret = gu_str2ll ("354G0", &llret); fail_if (strret[0] != '0'); fail_if ((354LL << 30) != llret); strret = gu_str2ll ("0m", &llret); fail_if (strret[0] != '\0'); fail_if (0 != llret); strret = gu_str2ll ("-999999999999999g", &llret); fail_if (strret[0] != '\0'); fail_if (LLONG_MIN != llret); bool b; strret = gu_str2bool ("-1a", &b); fail_if (strret[0] != '-'); fail_if (false != b); strret = gu_str2bool ("-1", &b); fail_if (strret[0] != '-'); fail_if (false != b); strret = gu_str2bool ("1a", &b); fail_if (strret[0] != '1'); fail_if (false != b); strret = gu_str2bool ("35", &b); fail_if (strret[0] != '3'); fail_if (false != b); strret = gu_str2bool ("0k", &b); fail_if (strret[0] != '0'); fail_if (false != b); strret = gu_str2bool ("1", &b); fail_if (strret[0] != '\0'); fail_if (true != b); strret = gu_str2bool ("0", &b); fail_if (strret[0] != '\0'); fail_if (false != b); strret = gu_str2bool ("Onn", &b); fail_if (strret[0] != 'O'); fail_if (false != b); strret = gu_str2bool ("oFf", &b); fail_if (strret[0] != '\0'); fail_if (false != b); strret = gu_str2bool ("offt", &b); fail_if (strret[0] != 'o'); fail_if (false != b); strret = gu_str2bool ("On", &b); fail_if (strret[0] != '\0'); fail_if (true != b); strret = gu_str2bool ("tru", &b); fail_if (strret[0] != 't'); fail_if (false != b); strret = gu_str2bool ("trUE", &b); fail_if (strret[0] != '\0'); fail_if (true != b); strret = gu_str2bool ("truEth", &b); fail_if (strret[0] != 't'); fail_if (false != b); strret = gu_str2bool (" fALsE", &b); fail_if (strret[0] != ' '); fail_if (false != b); strret = gu_str2bool ("fALsE", &b); fail_if (strret[0] != '\0'); fail_if (false != b); strret = gu_str2bool ("fALsEth", &b); fail_if (strret[0] != 'f'); fail_if (false != b); void* ptr; strret = gu_str2ptr ("-01234abc", &ptr); fail_if (strret[0] != '\0'); fail_if (-0x1234abcLL != (intptr_t)ptr, "Expected %lld, got %lld", -0x1234abcLL, (intptr_t)ptr); } END_TEST Suite *gu_utils_suite(void) { Suite *s = suite_create("Galera misc utils functions"); TCase *tc = tcase_create("gu_utils"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_strconv_test); return s; } galera-3-25.3.20/galerautils/tests/gu_stats_test.hpp0000644000015300001660000000031213042054732022211 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #ifndef __gu_stats_test__ #define __gu_stats_test__ #include extern Suite *gu_stats_suite(void); #endif // __gu_stats_test__ galera-3-25.3.20/galerautils/tests/gu_fnv_test.c0000644000015300001660000000334613042054732021311 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #include "gu_fnv_test.h" #include static const char* const test_buf = "chongo /\\../\\"; // enable normal FNV mode for reference hash checking #define GU_FNV_NORMAL #include "../src/gu_fnv.h" START_TEST (gu_fnv32_test) { uint32_t ret = 0; gu_fnv32a_internal (test_buf, strlen(test_buf), &ret); fail_if (GU_FNV32_SEED != ret, "FNV32 failed: expected %"PRIu32", got %"PRIu32, GU_FNV32_SEED, ret); } END_TEST START_TEST (gu_fnv64_test) { uint64_t ret = 0; gu_fnv64a_internal (test_buf, strlen(test_buf), &ret); fail_if (GU_FNV64_SEED != ret, "FNV64 failed: expected %"PRIu64", got %"PRIu64, GU_FNV64_SEED, ret); } END_TEST START_TEST (gu_fnv128_test) { gu_uint128_t GU_SET128(ret, 0, 0); gu_fnv128a_internal (test_buf, strlen(test_buf), &ret); #if defined(__SIZEOF_INT128__) fail_if (!GU_EQ128(GU_FNV128_SEED, ret), "FNV128 failed: expected %"PRIx64" %"PRIx64", got %"PRIx64" %"PRIx64, (uint64_t)(GU_FNV128_SEED >> 64), (uint64_t)GU_FNV128_SEED, (uint64_t)(ret >> 64), (uint64_t)ret); #else fail_if (!GU_EQ128(GU_FNV128_SEED, ret), "FNV128 failed: expected %"PRIx64" %"PRIx64", got %"PRIx64" %"PRIx64, GU_FNV128_SEED.u64[GU_64HI], GU_FNV128_SEED.u64[GU_64LO], ret.u64[GU_64HI], ret.u64[GU_64LO]); #endif } END_TEST Suite *gu_fnv_suite(void) { Suite *s = suite_create("FNV hash"); TCase *tc_fnv = tcase_create("gu_fnv"); suite_add_tcase (s, tc_fnv); tcase_add_test(tc_fnv, gu_fnv32_test); tcase_add_test(tc_fnv, gu_fnv64_test); tcase_add_test(tc_fnv, gu_fnv128_test); return s; } galera-3-25.3.20/galerautils/tests/gu_utils_test.h0000644000015300001660000000026413042054732021661 0ustar jenkinsjenkins// Copyright (C) 2010 Codership Oy // $Id$ #ifndef __gu_utils_test__ #define __gu_utils_test__ Suite *gu_utils_suite(void); #endif /* __gu_utils_test__ */ galera-3-25.3.20/galerautils/tests/gu_string_utils_test.hpp0000644000015300001660000000035313042054732023606 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_string_utils_test__ #define __gu_string_utils_test__ #include extern Suite* gu_string_utils_suite(void); #endif /* __gu_string_utils_test__ */ galera-3-25.3.20/galerautils/tests/gu_string_test.hpp0000644000015300001660000000033113042054732022362 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_string_test__ #define __gu_string_test__ #include extern Suite *gu_string_suite(void); #endif /* __gu_string_test__ */ galera-3-25.3.20/galerautils/tests/gu_dbug_test.c0000644000015300001660000000315013042054732021432 0ustar jenkinsjenkins// Copyright (C) 2008 Codership Oy // $Id$ /* Pthread yield */ #define _GNU_SOURCE 1 #include #include #include #include #include "gu_dbug_test.h" #include "../src/gu_dbug.h" static void cf() { GU_DBUG_ENTER("cf"); GU_DBUG_PRINT("galera", ("hello from cf")); sched_yield(); GU_DBUG_VOID_RETURN; } static void bf() { GU_DBUG_ENTER("bf"); GU_DBUG_PRINT("galera", ("hello from bf")); sched_yield(); cf(); GU_DBUG_VOID_RETURN; } static void af() { GU_DBUG_ENTER("af"); GU_DBUG_PRINT("galera", ("hello from af")); sched_yield(); bf(); GU_DBUG_VOID_RETURN; } static time_t stop = 0; static void *dbg_thr(void *arg) { while (time(NULL) < stop) { af(); } pthread_exit(NULL); } START_TEST(gu_dbug_test) { int i; #define N_THREADS 10 pthread_t th[N_THREADS]; /* Log > /dev/null */ GU_DBUG_FILE = fopen("/dev/null", "a+"); /* These should not produce output yet */ af(); af(); af(); /* Start logging */ GU_DBUG_PUSH("d:t:i"); GU_DBUG_PRINT("galera", ("Start logging")); af(); af(); af(); /* Run few threads concurrently */ stop = time(NULL) + 2; for (i = 0; i < N_THREADS; i++) pthread_create(&th[i], NULL, &dbg_thr, NULL); for (i = 0; i < N_THREADS; i++) pthread_join(th[i], NULL); } END_TEST Suite *gu_dbug_suite(void) { Suite *s = suite_create("Galera dbug functions"); TCase *tc = tcase_create("gu_dbug"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_dbug_test); tcase_set_timeout(tc, 60); return s; } galera-3-25.3.20/galerautils/tests/gu_histogram_test.cpp0000644000015300001660000000134113042054732023046 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #include "../src/gu_histogram.hpp" #include "../src/gu_logger.hpp" #include #include "gu_histogram_test.hpp" using namespace gu; START_TEST(test_histogram) { Histogram hs("0.0,0.0005,0.001,0.002,0.005,0.01,0.02,0.05,0.1,0.5,1.,5."); hs.insert(0.001); log_info << hs; for (size_t i = 0; i < 1000; ++i) { hs.insert(double(::rand())/RAND_MAX); } log_info << hs; hs.clear(); log_info << hs; } END_TEST Suite* gu_histogram_suite() { TCase* t = tcase_create ("test_histogram"); tcase_add_test (t, test_histogram); Suite* s = suite_create ("gu::Histogram"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_mmh3_test.h0000644000015300001660000000031313042054732021360 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_mmh3_test__ #define __gu_mmh3_test__ #include extern Suite *gu_mmh3_suite(void); #endif /* __gu_mmh3_test__ */ galera-3-25.3.20/galerautils/tests/gu_bswap_test.h0000644000015300001660000000026413042054732021635 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_bswap_test__ #define __gu_bswap_test__ Suite *gu_bswap_suite(void); #endif /* __gu_bswap_test__ */ galera-3-25.3.20/galerautils/tests/gu_vector_test.cpp0000644000015300001660000000246513042054732022363 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #include "../src/gu_vector.hpp" #include "gu_vector_test.hpp" START_TEST (simple_test) { // we are not to test the whole vector functionality, it is provided // by incorporated std::vector. We just need to see that allocator // works as expected gu::Vector v1; v1->reserve(12); fail_if (v1->size() != 0); v1->push_back(12); fail_if (v1->size() != 1); v1->resize(11); fail_if (v1->size() != 11); fail_if (v1.in_heap() != false); v1[10]=1; fail_if (v1[10] != v1()[10]); gu::Vector v2(v1); fail_if (v2->size() != v1->size()); fail_if (v1[10] != v2[10]); fail_if (&v1[10] == &v2[10]); v2[10]=2; fail_if (v1[10] == v2[10]); v2() = v1(); fail_if (v1[10] != v2[10]); fail_if (&v1[0] == &v2[0]); fail_if (v2.in_heap() != false); v2->resize(32); fail_if (v2.in_heap() != true); fail_if (v1.in_heap() != false); v2[25]=1; v1->resize(32); fail_if (v1.in_heap() != true); v1[25]=2; fail_if (v1[25] == v2[25]); } END_TEST Suite* gu_vector_suite(void) { TCase* t = tcase_create ("simple_test"); tcase_add_test (t, simple_test); Suite* s = suite_create ("gu::Vector"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_stats_test.cpp0000644000015300001660000000242413042054732022212 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #include "../src/gu_stats.hpp" #include "gu_stats_test.hpp" #include #include using namespace gu; static inline bool double_equal(double a, double b) { return (std::fabs(a - b) <= std::fabs(a + b) * std::numeric_limits::epsilon()); } START_TEST(test_stats) { Stats st; st.insert(10.0); st.insert(20.0); st.insert(30.0); fail_if(!double_equal(st.mean(), 20.0)); fail_if(!double_equal(st.variance() * 3, 200.0), "%e != 0", st.variance()*3-200.0); fail_if(!double_equal(st.min(), 10.0)); fail_if(!double_equal(st.max(), 30.0)); st.clear(); st.insert(10.0); fail_if(!double_equal(st.mean(), 10.0)); fail_if(!double_equal(st.variance(), 0.0)); fail_if(!double_equal(st.min(), 10.0)); fail_if(!double_equal(st.max(), 10.0)); st.clear(); fail_if(!double_equal(st.mean(), 0.0)); fail_if(!double_equal(st.variance(), 0.0)); fail_if(!double_equal(st.min(), 0.0)); fail_if(!double_equal(st.max(), 0.0)); } END_TEST Suite* gu_stats_suite() { TCase* t = tcase_create ("test_stats"); tcase_add_test (t, test_stats); Suite* s = suite_create ("gu::Stats"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_mem_pool_test.hpp0000644000015300001660000000033313042054732022665 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy // $Id$ #ifndef __gu_mem_pool_test__ #define __gu_mem_pool_test__ #include extern Suite *gu_mem_pool_suite(void); #endif /* __gu_mem_pool_test__ */ galera-3-25.3.20/galerautils/tests/gu_crc32c_test.h0000644000015300001660000000033313042054732021575 0ustar jenkinsjenkins/* * Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_crc32c_test_h__ #define __gu_crc32c_test_h__ #include Suite* gu_crc32c_suite(void); #endif /* __gu_crc32c_test_h__ */ galera-3-25.3.20/galerautils/tests/gu_fifo_test.h0000644000015300001660000000026013042054732021440 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_fifo_test__ #define __gu_fifo_test__ Suite *gu_fifo_suite(void); #endif /* __gu_fifo_test__ */ galera-3-25.3.20/galerautils/tests/gu_vlq_test.cpp0000644000015300001660000002016013042054732021653 0ustar jenkinsjenkins// // Copyright (C) 2011 Codership Oy // #include "gu_vlq.hpp" #include "gu_vlq_test.hpp" #include "gu_logger.hpp" #include #include #include #include #include static struct valval { const unsigned long long val; const size_t size; } valarr[] = { {0x00 , 1}, {0x01 , 1}, {0x7fULL , 1}, {0x80ULL , 2}, {0x3fffULL , 2}, {0x4000ULL , 3}, {0x1fffffULL , 3}, {0x200000ULL , 4}, {0x0fffffffULL , 4}, {0x10000000ULL , 5}, {0x07ffffffffULL , 5}, {0x0800000000ULL , 6}, {0x03ffffffffffULL , 6}, {0x040000000000ULL , 7}, {0x01ffffffffffffULL , 7}, {0x02000000000000ULL , 8}, {0x00ffffffffffffffULL, 8}, {0x0100000000000000ULL, 9}, {0x7fffffffffffffffULL, 9}, {0x8000000000000000ULL, 10}, {0xffffffffffffffffULL, 10} }; // http://www.cplusplus.com/faq/sequences/arrays/sizeof-array/ template inline size_t SizeOfArray( const T(&)[ N ] ) { return N; } START_TEST(test_uleb128_size) { for (size_t i(0); i < SizeOfArray(valarr); ++i) { size_t size(gu::uleb128_size(valarr[i].val)); fail_unless(size == valarr[i].size, "got size %z, expected %z for value 0x%llx", size, valarr[i].size, valarr[i].val); } } END_TEST START_TEST(test_uleb128_encode) { std::vector buf; for (size_t i(0); i < SizeOfArray(valarr); ++i) { buf.resize(valarr[i].size); size_t offset(gu::uleb128_encode(valarr[i].val, &buf[0], buf.size(), 0)); fail_unless(offset == valarr[i].size, "got offset %zu, expected %zu for value 0x%llx", offset, valarr[i].size, valarr[i].val); } } END_TEST START_TEST(test_uleb128_decode) { std::vector buf; for (size_t i(0); i < SizeOfArray(valarr); ++i) { buf.resize(valarr[i].size); size_t offset(gu::uleb128_encode(valarr[i].val, &buf[0], buf.size(), 0)); unsigned long long val; try { offset = gu::uleb128_decode(&buf[0], buf.size(), 0, val); fail_unless(offset == valarr[i].size, "got offset %zu, expected %zu for value 0x%llx", offset, valarr[i].size, valarr[i].val); fail_unless(val == valarr[i].val, "got value 0x%llx, expected 0x%llx", val, valarr[i].val); } catch (gu::Exception& e) { fail("Exception in round %zu for encoding of size %zu: %s", i, valarr[i].size, e.what()); } } } END_TEST START_TEST(test_uleb128_misc) { std::vector buf(10); // check uint8_t whole range for (size_t i(0); i <= std::numeric_limits::max(); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint8_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) fail("0x%x != 0x%x", i, val); } // check uint16_t whole range for (size_t i(0); i <= std::numeric_limits::max(); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint16_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) fail("0x%x != 0x%x", i, val); } // check uint32_t: 0 -> 1^20 for (size_t i(0); i < (1 << 20); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint32_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) fail("0x%x != 0x%x", i, val); } // check uin32_t: max - 1^20 -> max for (uint64_t i(std::numeric_limits::max() - (1 << 20)); i <= std::numeric_limits::max(); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint32_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) fail("0x%x != 0x%x", i, val); } // uint64_t is tested for representation byte boundaries earlier, // run test just for random values for (size_t i(0); i < (1 << 16); ++i) { unsigned long long val(static_cast(rand()) * static_cast(rand())); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); unsigned long long val2; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val2); if (val != val2) fail("0x%llx != 0x%llx", val, val2); } { // check that exception is thrown if target type is not // wide enough // uint8_t uint64_t val(static_cast(std::numeric_limits::max()) + 1); buf.resize(gu::uleb128_size(val)); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); try { uint8_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); fail("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // uint16_t val = static_cast(std::numeric_limits::max()) + 1; buf.resize(gu::uleb128_size(val)); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); try { uint16_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); fail("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // uint32_t val = static_cast(std::numeric_limits::max()) + 1; buf.resize(gu::uleb128_size(val)); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); try { uint32_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); fail("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // check that exception is thrown if terminating byte is missing buf.resize(buf.size() - 1); try { uint64_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); fail("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // finally check the representation that cannot be stored with // uint64_t gu::byte_t b[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, // <--- up here 9 * 7 = 63 bits 0x02}; // <--- requires two additional bits try { uint64_t cval; (void)gu::uleb128_decode(b, SizeOfArray(b), 0, cval); fail("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } } } END_TEST Suite* gu_vlq_suite() { Suite* s(suite_create("gu::vlq")); TCase* tc; tc = tcase_create("test_uleb128_size"); tcase_add_test(tc, test_uleb128_size); suite_add_tcase(s, tc); tc = tcase_create("test_uleb128_encode"); tcase_add_test(tc, test_uleb128_encode); suite_add_tcase(s, tc); tc = tcase_create("test_uleb128_decode"); tcase_add_test(tc, test_uleb128_decode); suite_add_tcase(s, tc); tc = tcase_create("test_uleb128_misc"); tcase_add_test(tc, test_uleb128_misc); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/galerautils/tests/gu_vlq_test.hpp0000644000015300001660000000026613042054732021665 0ustar jenkinsjenkins// // Copyright (C) 2011 Codership Oy // #ifndef GU_VLQ_TEST_HPP #define GU_VLQ_TEST_HPP #include Suite* gu_vlq_suite(); #endif // GU_VLQ_TEST_HPP galera-3-25.3.20/galerautils/tests/gu_datetime_test.cpp0000644000015300001660000000657213042054732022660 0ustar jenkinsjenkins/* * Copyright (C) 2009 Codership Oy * * $Id$ */ #include "gu_datetime.hpp" #include "gu_logger.hpp" #include "gu_utils.hpp" #include "gu_datetime_test.hpp" using namespace gu; using namespace gu::datetime; START_TEST(test_units) { fail_unless(NSec == 1LL); fail_unless(USec == 1000LL); fail_unless(MSec == 1000LL*1000LL); fail_unless(Sec == 1000LL*1000LL*1000LL); fail_unless(Min == 60LL*1000LL*1000LL*1000LL); fail_unless(Hour == 60LL*60LL*1000LL*1000LL*1000LL); fail_unless(Day == 24LL*60LL*60LL*1000LL*1000LL*1000LL); fail_unless(Month == 30LL*24LL*60LL*60LL*1000LL*1000LL*1000LL); fail_unless(Year == 12LL*30LL*24LL*60LL*60LL*1000LL*1000LL*1000LL); } END_TEST START_TEST(test_period) { // Zero periods fail_unless(Period("").get_nsecs() == 0); fail_unless(Period("P").get_nsecs() == 0); fail_unless(Period("PT").get_nsecs() == 0); // Year-mon-day fail_unless(Period("P3Y").get_nsecs() == 3*Year); fail_unless(Period("P5M").get_nsecs() == 5*Month); fail_unless(Period("P37D").get_nsecs() == 37*Day); fail_unless(Period("P3Y17M").get_nsecs() == 3*Year + 17*Month); fail_unless(Period("P5Y66D").get_nsecs() == 5*Year + 66*Day); fail_unless(Period("P37M44D").get_nsecs() == 37*Month + 44*Day); fail_unless(Period("P3YT").get_nsecs() == 3*Year); fail_unless(Period("P5MT").get_nsecs() == 5*Month); fail_unless(Period("P37DT").get_nsecs() == 37*Day); fail_unless(Period("P3Y17MT").get_nsecs() == 3*Year + 17*Month); fail_unless(Period("P5Y66DT").get_nsecs() == 5*Year + 66*Day); fail_unless(Period("P37M44DT").get_nsecs() == 37*Month + 44*Day); // Hour-min-sec fail_unless(Period("PT3H").get_nsecs() == 3*Hour); fail_unless(Period("PT5M").get_nsecs() == 5*Min); fail_unless(Period("P37S").get_nsecs() == 37*Sec); // fail_unless(Period("PT3.578777S").get_nsecs() == 3*Sec + 578*MSec + 777*USec); fail_unless(Period("PT0.5S").get_nsecs() == 500*MSec); // fail_unless(Period("PT5H7M3.578777S").get_nsecs() == 5*Hour + 7*Min + 3*Sec + 578*MSec + 777*USec); // @todo these should fail fail_unless(Period("PT.S").get_nsecs() == 0); fail_unless(Period("PT.D").get_nsecs() == 0); } END_TEST START_TEST(test_date) { Date d1(Date::now()); Date d2 = d1 + Period("PT6S"); fail_unless(d2.get_utc() == d1.get_utc() + 6*Sec); fail_unless(d2 - Period("PT6S") == d1); Date max(Date::max()); fail_unless(d1 < max); } END_TEST START_TEST(test_trac_712) { try { Period p; p = gu::from_string("0x3"); // used to throw gu::Exception } catch (gu::NotFound& nf) { } } END_TEST Suite* gu_datetime_suite() { Suite* s = suite_create("gu::datetime"); TCase* tc; tc = tcase_create("test_units"); tcase_add_test(tc, test_units); suite_add_tcase(s, tc); tc = tcase_create("test_period"); tcase_add_test(tc, test_period); suite_add_tcase(s, tc); tc = tcase_create("test_date"); tcase_add_test(tc, test_date); suite_add_tcase(s, tc); tc = tcase_create("test_trac_712"); tcase_add_test(tc, test_trac_712); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/galerautils/tests/gu_rset_test.cpp0000644000015300001660000002731713042054732022041 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #undef NDEBUG #include "../src/gu_rset.hpp" #include "gu_rset_test.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; class TestRecord : public gu::Serializable { public: TestRecord (size_t size, const char* str) : Serializable(), size_(size), buf_(reinterpret_cast(::malloc(size_))), str_(reinterpret_cast(buf_ + sizeof(uint32_t))), own_(true) { fail_if (size_ > 0x7fffffff); if (0 == buf_) throw std::runtime_error("failed to allocate record"); gu::byte_t* tmp = const_cast(buf_); *reinterpret_cast(tmp) = htog32(size_); ::strncpy (const_cast(str_), str, size_ - 4); } TestRecord (const gu::byte_t* const buf, ssize_t const size) : Serializable(), size_(TestRecord::serial_size(buf, size)), buf_(buf), str_(reinterpret_cast(buf_ + sizeof(uint32_t))), own_(false) {} TestRecord (const TestRecord& t) : size_(t.size_), buf_(t.buf_), str_(t.str_), own_(false) {} virtual ~TestRecord () { if (own_) free (const_cast(buf_)); } const gu::byte_t* buf() const { return buf_; } const char* c_str() const { return str_; } ssize_t serial_size() const { return my_serial_size(); } static ssize_t serial_size(const gu::byte_t* const buf, ssize_t const size) { check_buf (buf, size, 1); return gtoh32 (*reinterpret_cast(buf)); } bool operator!= (const TestRecord& t) const { return (::strcmp(str_, t.str_)); } bool operator== (const TestRecord& t) const { return (!(*this != t)); } private: size_t const size_; const gu::byte_t* const buf_; const char* const str_; bool const own_; ssize_t my_serial_size () const { return size_; }; ssize_t my_serialize_to (void* buf, ssize_t size) const { check_buf (buf, size, size_); ::memcpy (buf, buf_, size_); return size_; } static void check_buf (const void* const buf, ssize_t const size, ssize_t min_size) { if (gu_unlikely (buf == 0 || size < min_size)) throw std::length_error("buffer too short"); } TestRecord& operator= (const TestRecord&); }; START_TEST (ver0) { size_t const MB = 1 << 20; // the choice of sizes below is based on default allocator memory store size // of 4MB. If it is changed, these need to be changed too. TestRecord rout0(120, "abc0"); fail_if (rout0.serial_size() != 120); fail_if (gtoh32(*reinterpret_cast(rout0.buf())) != 120); TestRecord rout1(121, "abc1"); TestRecord rout2(122, "012345"); TestRecord rout3(123, "defghij"); TestRecord rout4(3*MB, "klm"); TestRecord rout5(1*MB, "qpr"); std::vector records; records.push_back (&rout0); records.push_back (&rout1); records.push_back (&rout2); records.push_back (&rout3); records.push_back (&rout4); records.push_back (&rout5); gu::byte_t reserved[1024]; TestBaseName str("gu_rset_test"); gu::RecordSetOut rset_out(reserved, sizeof(reserved), str, gu::RecordSet::CHECK_MMH64, gu::RecordSet::VER1); size_t offset(rset_out.size()); fail_if (1 != rset_out.page_count()); std::pair rp; int rsize; const void* rout_ptrs[7]; // this should be allocated inside current page rp = rset_out.append (rout0); rout_ptrs[0] = rp.first; rsize = rp.second; fail_if (rsize != rout0.serial_size()); fail_if (rsize < 0); fail_if (rsize != TestRecord::serial_size(rp.first, rsize)); offset += rsize; fail_if (rset_out.size() != offset); fail_if (1 != rset_out.page_count()); // this should trigger new page since not stored rp = rset_out.append (rout1.buf(), rout1.serial_size(), false); rout_ptrs[1] = rp.first; rsize = rp.second; fail_if (rsize != rout1.serial_size()); offset += rsize; fail_if (rset_out.size() != offset); fail_if (2 != rset_out.page_count()); // this should trigger new page since previous one was not stored rp = rset_out.append (rout2); rout_ptrs[2] = rp.first; rsize = rp.second; fail_if (rsize != rout2.serial_size()); fail_if (rsize < 0); fail_if (rsize != TestRecord::serial_size(rp.first, rsize)); offset += rsize; fail_if (rset_out.size() != offset); fail_if (3 != rset_out.page_count(), "Expected %d pages, found %zu", 3, rset_out.page_count()); //***** test partial record appending *****// // this should be allocated inside the current page. rp = rset_out.append (rout3.buf(), 3); // rout_ptrs[2] = rp.first; rsize = rp.second; offset += rp.second; fail_if (3 != rset_out.page_count()); // this should trigger a new page, since not stored rp = rset_out.append (rout3.buf() + 3, rout3.serial_size() - 3, false, false); rout_ptrs[3] = rp.first; rsize += rp.second; fail_if (rsize != rout3.serial_size()); offset += rp.second; fail_if (rset_out.size() != offset); fail_if (4 != rset_out.page_count()); // this should trigger new page, because won't fit in the current page rp = rset_out.append (rout4); rout_ptrs[4] = rp.first; rsize = rp.second; fail_if (rsize != rout4.serial_size()); offset += rsize; fail_if (rset_out.size() != offset); fail_if (5 != rset_out.page_count()); // this should trigger new page, because 4MB RAM limit exceeded rp = rset_out.append (rout5); rout_ptrs[5] = rp.first; rsize = rp.second; fail_if (rsize != rout5.serial_size()); offset += rsize; fail_if (rset_out.size() != offset); fail_if (6 != rset_out.page_count(), "Expected %d pages, found %zu", 5, rset_out.page_count()); fail_if (records.size() != size_t(rset_out.count())); gu::RecordSet::GatherVector out_bufs; out_bufs->reserve (rset_out.page_count()); size_t min_out_size(0); for (size_t i = 0; i < records.size(); ++i) { min_out_size += records[i]->serial_size(); } size_t const out_size (rset_out.gather (out_bufs)); fail_if (out_size <= min_out_size || out_size > offset); fail_if (out_bufs->size() != static_cast(rset_out.page_count()), "Expected %zu buffers, got: %zd", rset_out.page_count(), out_bufs->size()); /* concatenate all buffers into one */ std::vector in_buf; in_buf.reserve(out_size); mark_point(); for (size_t i = 0; i < out_bufs->size(); ++i) { // 0th fragment starts with header, so it it can't be used in this check fail_if (i > 0 && rout_ptrs[i] != out_bufs[i].ptr, "Record pointers don't mathch after gather(). " "old: %p, new: %p", rout_ptrs[i],out_bufs[i].ptr); ssize_t size = gtoh32( *reinterpret_cast(out_bufs[i].ptr)); const char* str = reinterpret_cast(out_bufs[i].ptr) + sizeof(uint32_t); // 0th fragment starts with header, so it it can't be used in this check fail_if (i > 0 && size <= ssize_t(sizeof(uint32_t)), "Expected size > 4, got %zd(%#010zx). i = %zu, buf = %s", size, size, i, str); // the above variables make have sense only on certain pages // hence ifs below size_t k = i; switch (i) { case 3: break; // 4th page is partial 4th record case 1: case 2: fail_if (::strcmp(str, records[k]->c_str()), "Buffer %zu: appending '%s', expected '%s'", i, str, records[k]->c_str()); } if (i == 1 || i == 4) { fail_if (size != records[k]->serial_size(), "Buffer %zu: appending size %zd, expected %zd", i, size, records[k]->serial_size()); } log_info << "\nadding buf " << i << ": " << gu::Hexdump(out_bufs[i].ptr, std::min(out_bufs[i].size, 24), true); size_t old_size = in_buf.size(); const gu::byte_t* const begin (reinterpret_cast(out_bufs[i].ptr)); in_buf.insert (in_buf.end(), begin, begin + out_bufs[i].size); fail_if (old_size + out_bufs[i].size != in_buf.size()); } fail_if (in_buf.size() != out_size, "Sent buf size: %zu, recvd buf size: %zu", out_size, in_buf.size()); log_info << "Resulting RecordSet buffer:\n" << gu::Hexdump(in_buf.data(), 32, false) << '\n' << gu::Hexdump(in_buf.data(), 32, true); gu::RecordSetIn const rset_in(in_buf.data(), in_buf.size()); fail_if (rset_in.size() != rset_out.size()); fail_if (rset_in.count() != rset_out.count()); for (ssize_t i = 0; i < rset_in.count(); ++i) { TestRecord const rin(rset_in.next()); fail_if (rin != *records[i], "Record %d failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } /* Test checksum method: */ try { rset_in.checksum(); } catch (std::exception& e) { fail("%s", e.what()); } /* test buf() method */ gu::RecordSetIn const rset_in_buf(rset_in.buf().ptr, rset_in.buf().size); fail_if(rset_in.count() != rset_in_buf.count()); fail_if(rset_in.size() != rset_in_buf.size()); fail_if (rset_in.buf().ptr != rset_in_buf.buf().ptr); for (ssize_t i = 0; i < rset_in_buf.count(); ++i) { TestRecord const rin(rset_in_buf.next()); fail_if (rin != *records[i], "Record %d failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } /* test empty RecordSetIn creation with subsequent initialization */ gu::RecordSetIn rset_in_empty; fail_if (rset_in_empty.size() != 0); fail_if (rset_in_empty.count() != 0); try { TestRecord const rin(rset_in_empty.next()); fail ("next() succeeded on an empty writeset"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EPERM); } rset_in_empty.init(in_buf.data(), in_buf.size(), true); fail_if (rset_in_empty.size() != rset_out.size()); fail_if (rset_in_empty.count() != rset_out.count()); /* Try some data corruption: swap a bit */ in_buf[10] ^= 1; try { rset_in.checksum(); fail("checksum() didn't throw on corrupted set"); } catch (std::exception& e) {} try { rset_in_empty.checksum(); fail("checksum() didn't throw on corrupted set"); } catch (std::exception& e) {} } END_TEST START_TEST (empty) { gu::RecordSetIn const rset_in(0, 0); fail_if (0 != rset_in.size()); fail_if (0 != rset_in.count()); try { rset_in.checksum(); } catch (std::exception& e) { fail("%s", e.what()); } } END_TEST Suite* gu_rset_suite () { TCase* t = tcase_create ("RecordSet"); tcase_add_test (t, ver0); tcase_add_test (t, empty); tcase_set_timeout(t, 60); Suite* s = suite_create ("gu::RecordSet"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_thread_test.cpp0000644000015300001660000000265613042054732022332 0ustar jenkinsjenkins// // Copyright (C) 2016 Codership Oy // #include "gu_thread.hpp" #include #include "gu_thread_test.hpp" START_TEST(check_thread_schedparam_parse) { gu::ThreadSchedparam sp_other(SCHED_OTHER, 0); std::ostringstream oss; oss << sp_other; fail_unless(oss.str() == "other:0", "'%s'", oss.str().c_str()); oss.str(""); gu::ThreadSchedparam sp_fifo(SCHED_FIFO, 95); oss << sp_fifo; fail_unless(oss.str() == "fifo:95", "'%s'", oss.str().c_str()); oss.str(""); gu::ThreadSchedparam sp_rr(SCHED_RR, 96); oss << sp_rr; fail_unless(oss.str() == "rr:96", "'%s'", oss.str().c_str()); } END_TEST START_TEST(check_thread_schedparam_system_default) { gu::ThreadSchedparam sp(gu::thread_get_schedparam(pthread_self())); std::ostringstream sp_oss; sp_oss << sp; std::ostringstream system_default_oss; system_default_oss << gu::ThreadSchedparam::system_default; fail_unless(sp == gu::ThreadSchedparam::system_default, "sp '%s' != system default '%s'", sp_oss.str().c_str(), system_default_oss.str().c_str()); } END_TEST Suite* gu_thread_suite() { Suite* s(suite_create("galerautils Thread")); TCase* tc(tcase_create("schedparam")); suite_add_tcase(s, tc); tcase_add_test(tc, check_thread_schedparam_parse); tcase_add_test(tc, check_thread_schedparam_system_default); return s; } galera-3-25.3.20/galerautils/tests/gu_alloc_test.hpp0000644000015300001660000000031713042054732022152 0ustar jenkinsjenkins// Copyright (C) 2013 Codership Oy // $Id$ #ifndef __gu_alloc_test__ #define __gu_alloc_test__ #include extern Suite *gu_alloc_suite(void); #endif /* __gu_alloc_test__ */ galera-3-25.3.20/galerautils/tests/gu_time_test.h0000644000015300001660000000026013042054732021453 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_time_test__ #define __gu_time_test__ Suite *gu_time_suite(void); #endif /* __gu_time_test__ */ galera-3-25.3.20/galerautils/tests/gu_uri_test.cpp0000644000015300001660000003135713042054732021662 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #include #include #include "../src/gu_uri.hpp" #include "../src/gu_exception.hpp" #include "../src/gu_logger.hpp" #include "gu_uri_test.hpp" using std::string; using std::pair; using gu::URI; using gu::URIQueryList; using gu::NotSet; using gu::NotFound; using gu::Exception; START_TEST (uri_test1) // checking normal URI { const string scheme("scheme"); const string user ("user:pswd"); const string host ("[::ffff:192.168.0.1]"); // IPv4 over IPv6 const string port ("4567"); const string path ("/path1/path2"); const string opt1 ("opt1"); const string val1 ("val1"); const string opt2 ("opt2"); const string val2 ("val2"); const string query (opt1 + '=' + val1 + '&' + opt2 + '=' + val2); const string frag ("frag"); string auth = user + "@" + host + ":" + port; string uri_str = scheme + "://" + auth + path + "?" + query + "#" + frag; try { URI uri(uri_str); try { fail_if (scheme != uri.get_scheme(), "Scheme '%s' != '%s'", scheme.c_str(), uri.get_scheme().c_str()); } catch (NotSet&) { fail ("Scheme not set in '%s'", uri_str.c_str()); } try { fail_if (user != uri.get_user(), "User info '%s' != '%s'", user.c_str(), uri.get_user().c_str()); } catch (NotSet&) { fail ("User info not set in '%s'", uri_str.c_str()); } try { fail_if (host != uri.get_host(), "Host '%s' != '%s'", host.c_str(), uri.get_host().c_str()); } catch (NotSet&) { fail ("Host not set in '%s'", uri_str.c_str()); } try { fail_if (port != uri.get_port(), "Port '%s' != '%s'", port.c_str(), uri.get_port().c_str()); } catch (NotSet&) { fail ("Port not set in '%s'", uri_str.c_str()); } try { fail_if (path != uri.get_path(), "Path '%s' != '%s'", path.c_str(), uri.get_path().c_str()); } catch (NotSet&) { fail ("Path not set in '%s'", uri_str.c_str()); } try { fail_if (frag != uri.get_fragment(), "Fragment '%s' != '%s'", frag.c_str(), uri.get_fragment().c_str()); } catch (NotSet&) { fail ("Fragment not set in '%s'", uri_str.c_str()); } try { fail_if (auth != uri.get_authority(), "Authority '%s' != '%s'", auth.c_str(), uri.get_authority().c_str()); } catch (NotSet&) { fail ("Authority not set in '%s'", uri_str.c_str()); } URIQueryList ql = uri.get_query_list(); fail_if (ql.size() != 2, "Query list size %zu, expected 2", ql.size()); URIQueryList::const_iterator i = ql.begin(); fail_if (i->first != opt1, "got option '%s', expected '%s'", i->first.c_str(), opt1.c_str()); fail_if (i->second != val1, "got value '%s', expected '%s'", i->second.c_str(), val1.c_str()); ++i; fail_if (i->first != opt2, "got option '%s', expected '%s'", i->first.c_str(), opt2.c_str()); fail_if (i->second != val2, "got value '%s', expected '%s'", i->second.c_str(), val2.c_str()); fail_if (val1 != uri.get_option(opt1)); fail_if (val2 != uri.get_option(opt2)); try { uri.get_option("xxx"); fail ("Expected NotFound exception"); } catch (NotFound&) {} URI simple ("gcomm+pc://192.168.0.1"); } catch (Exception& e) { fail (e.what()); } } END_TEST START_TEST (uri_test2) // checking corner cases { #ifdef NDEBUG try { URI uri(""); fail ("URI should have failed."); } catch (Exception& e) {} #endif mark_point(); try { URI uri("scheme:"); } catch (Exception& e) { fail ("URI should be valid."); } mark_point(); #ifdef NDEBUG try { URI uri(":path"); fail ("URI should have failed."); } catch (Exception& e) {} #endif mark_point(); try { URI uri("a://b:c?d=e#f"); fail ("URI should have failed."); } catch (Exception& e) {} mark_point(); try { URI uri("a://b:99999?d=e#f"); fail ("URI should have failed."); } catch (Exception& e) {} mark_point(); #ifdef NDEBUG try { URI uri("?query"); fail ("URI should have failed."); } catch (Exception& e) {} #endif mark_point(); try { URI uri("scheme:path"); try { uri.get_user(); fail ("User should be unset"); } catch (NotSet&) {} try { uri.get_host(); fail ("Host should be unset"); } catch (NotSet&) {} try { uri.get_port(); fail ("Port should be unset"); } catch (NotSet&) {} try { uri.get_authority(); fail ("Authority should be unset"); } catch (NotSet&) {} try { uri.get_fragment(); fail ("Fragment should be unset"); } catch (NotSet&) {} fail_if (uri.get_query_list().size() != 0, "Query list must be empty"); } catch (Exception& e) { fail (e.what()); } mark_point(); try { URI uri("scheme:///path"); try { fail_if (uri.get_authority() != ""); } catch (NotSet&) { fail ("Authority should be set"); } try { uri.get_host(); fail("Host should be unset"); } catch (NotSet&) { } try { uri.get_user(); fail ("User should be unset"); } catch (NotSet&) {} try { uri.get_port(); fail ("Port should be unset"); } catch (NotSet&) {} try { fail_if (uri.get_path().length() != 5); } catch (NotSet&) { fail ("Path should be 5 characters long"); } } catch (Exception& e) { fail (e.what()); } mark_point(); try { URI uri("scheme://@/path"); try { fail_if (uri.get_authority() != "@"); } catch (NotSet&) { fail ("Authority should be set"); } try { fail_if (uri.get_user() != ""); } catch (NotSet&) { fail ("User should be set"); } try { fail_if (uri.get_host() != ""); } catch (NotSet&) { fail ("Host should be set"); } try { uri.get_port(); fail ("Port should be unset"); } catch (NotSet&) {} } catch (Exception& e) { fail (e.what()); } mark_point(); try { URI uri("scheme://@:/path"); try { fail_if (uri.get_authority() != "@"); } catch (NotSet&) { fail ("Authority should be set"); } try { fail_if (uri.get_user() != ""); } catch (NotSet&) { fail ("User should be set"); } try { fail_if (uri.get_host() != ""); } catch (NotSet&) { fail ("Host should be set"); } try { uri.get_port(); fail ("Port should be unset"); } catch (NotSet&) {} } catch (Exception& e) { fail (e.what()); } mark_point(); try { URI uri("scheme://"); try { fail_if (uri.get_authority() != ""); } catch (NotSet&) { fail ("Authority should be set"); } try { uri.get_user(); fail ("User should be unset"); } catch (NotSet&) {} try { uri.get_host(); fail("Host should be unset"); } catch (NotSet&) { } try { uri.get_port(); fail ("Port should be unset"); } catch (NotSet&) {} // According to http://tools.ietf.org/html/rfc3986#section-3.3 try { fail_if (uri.get_path() != ""); } catch (NotSet&) { fail ("Path should be set to empty"); } } catch (Exception& e) { fail (e.what()); } } END_TEST START_TEST (uri_test3) // Test from gcomm { #ifdef NDEBUG try { URI too_simple("http"); fail("too simple accepted"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } #endif URI empty_auth("http://"); fail_unless(empty_auth.get_scheme() == "http"); fail_unless(empty_auth.get_authority() == ""); URI simple_valid1("http://example.com"); fail_unless(simple_valid1.get_scheme() == "http"); fail_unless(simple_valid1.get_authority() == "example.com"); fail_unless(simple_valid1.get_path() == ""); fail_unless(simple_valid1.get_query_list().size() == 0); URI with_path("http://example.com/path/to/file.html"); fail_unless(with_path.get_scheme() == "http"); fail_unless(with_path.get_authority() == "example.com"); fail_unless(with_path.get_path() == "/path/to/file.html"); fail_unless(with_path.get_query_list().size() == 0); URI with_query("http://example.com?key1=val1&key2=val2"); fail_unless(with_query.get_scheme() == "http"); fail_unless(with_query.get_authority() == "example.com"); fail_unless(with_query.get_path() == ""); const URIQueryList& qlist = with_query.get_query_list(); fail_unless(qlist.size() == 2); URIQueryList::const_iterator i; i = qlist.find("key1"); fail_unless(i != qlist.end() && i->second == "val1"); i = qlist.find("key2"); fail_unless(i != qlist.end() && i->second == "val2"); URI with_uri_in_query("gcomm+gmcast://localhost:10001?gmcast.node=gcomm+tcp://localhost:10002&gmcast.node=gcomm+tcp://localhost:10003"); fail_unless(with_uri_in_query.get_scheme() == "gcomm+gmcast"); fail_unless(with_uri_in_query.get_authority() == "localhost:10001"); const URIQueryList& qlist2 = with_uri_in_query.get_query_list(); fail_unless(qlist2.size() == 2); pair ii; ii = qlist2.equal_range("gmcast.node"); fail_unless(ii.first != qlist2.end()); for (i = ii.first; i != ii.second; ++i) { fail_unless(i->first == "gmcast.node"); URI quri(i->second); fail_unless(quri.get_scheme() == "gcomm+tcp"); fail_unless(quri.get_authority().substr(0, string("localhost:1000").size()) == "localhost:1000"); } try { URI invalid1("http://example.com/?key1"); fail("invalid query accepted"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } } END_TEST START_TEST(uri_non_strict) { std::string const ip("1.2.3.4"); std::string const port("789"); std::string const addr(ip + ':' + port); try { URI u(ip); fail("Strict mode passed without scheme"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL, "Expected errno %d, got %d", EINVAL, e.get_errno()); } try { URI u(addr, false); fail_if (u.get_host() != ip); fail_if (u.get_port() != port); try { u.get_scheme(); fail("Scheme is '%s', should be unset", u.get_scheme().c_str()); } catch (gu::NotSet&) {} } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } } END_TEST START_TEST(uri_test_multihost) { try { gu::URI uri("tcp://host1,host2"); fail_unless(uri.get_authority_list().size() == 2); try { uri.get_authority_list()[0].user(); fail("User should not be set"); } catch (NotSet&) { } fail_unless(uri.get_authority_list()[0].host() == "host1"); try { uri.get_authority_list()[0].port(); fail("Port should not be set"); } catch (NotSet&) { } fail_unless(uri.get_authority_list()[1].host() == "host2"); } catch (gu::Exception& e) { fail(e.what()); } try { gu::URI uri("tcp://host1:1234,host2:,host3:3456"); fail_unless(uri.get_authority_list().size() == 3); try { uri.get_authority_list()[0].user(); fail("User should not be set"); } catch (NotSet&) { } fail_unless(uri.get_authority_list()[0].host() == "host1"); fail_unless(uri.get_authority_list()[0].port() == "1234"); fail_unless(uri.get_authority_list()[1].host() == "host2"); } catch (gu::Exception& e) { fail(e.what()); } } END_TEST Suite *gu_uri_suite(void) { Suite *s = suite_create("galerautils++ URI"); TCase *tc = tcase_create("URI"); suite_add_tcase (s, tc); tcase_add_test (tc, uri_test1); tcase_add_test (tc, uri_test2); tcase_add_test (tc, uri_test3); tcase_add_test (tc, uri_non_strict); tcase_add_test (tc, uri_test_multihost); return s; } galera-3-25.3.20/galerautils/tests/gu_mmh3_test.c0000644000015300001660000002167013042054732021364 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #include "gu_mmh3_test.h" #include "../src/gu_mmh3.h" #include "../src/gu_log.h" #include "../src/gu_hexdump.h" /* This is to verify all tails plus block + all tails. Max block is 16 bytes */ static const char test_input[] = "0123456789ABCDEF0123456789abcde"; typedef struct hash32 { uint8_t h[4]; } hash32_t; #define NUM_32_TESTS 8 /* 0 to 7 bytes */ static const hash32_t test_output32[NUM_32_TESTS] = { {{ 0x0b, 0x7c, 0x3e, 0xab }}, /* '' */ {{ 0xba, 0xeb, 0x75, 0x97 }}, /* '0' */ {{ 0x5d, 0x5c, 0x21, 0x60 }}, /* '01' */ {{ 0x4b, 0xff, 0x61, 0x41 }}, /* '012' */ {{ 0x35, 0x3b, 0x57, 0xca }}, /* '0123' */ {{ 0x09, 0xdd, 0x77, 0xf9 }}, /* '01234' */ {{ 0x1f, 0x3c, 0x29, 0x7b }}, /* '012345' */ {{ 0xe1, 0xbe, 0x2d, 0xce }} /* '0123456' */ }; typedef struct hash128 { uint8_t h[16]; } hash128_t; #define NUM_128_TESTS 32 /* 0 to 31 bytes */ static const hash128_t test_output128[NUM_128_TESTS] = { {{ 0xa9,0xce,0x5a,0x56,0x0c,0x0b,0xf7,0xd6,0x63,0x4f,0x6f,0x81,0x0e,0x0b,0xf2,0x0a }}, {{ 0x72,0xa1,0x46,0xa3,0x73,0x03,0x49,0x85,0x30,0xb9,0x52,0xaa,0x3b,0x00,0xad,0x23 }}, {{ 0x4f,0x32,0xa2,0x15,0x91,0x00,0xea,0xaa,0x59,0x90,0x48,0x30,0xe5,0x86,0x50,0xee }}, {{ 0x55,0xfe,0x86,0x3b,0x9c,0x67,0xc6,0xee,0x5c,0x06,0x34,0xd0,0xe5,0x15,0xfb,0xdd }}, {{ 0x3a,0x50,0x35,0xe5,0x72,0x75,0xa5,0x5e,0x46,0x3d,0x0e,0x23,0xbb,0x17,0x5a,0x66 }}, {{ 0x3b,0xff,0xb5,0x1a,0x93,0x0c,0x77,0x9a,0x40,0x5f,0x62,0x0c,0x40,0x15,0x0b,0x6e }}, {{ 0x7c,0xf8,0xf9,0xd2,0xfa,0x5a,0x8b,0x51,0x65,0x3c,0xa5,0x0e,0xa2,0xca,0x0a,0x87 }}, {{ 0x95,0x69,0x33,0x98,0xe4,0xb2,0x2a,0x21,0xd4,0x23,0x21,0x80,0xb1,0x00,0x46,0xbb }}, {{ 0x92,0xca,0xd3,0xbb,0x39,0x16,0x96,0xb5,0x3a,0x61,0x58,0x53,0xbb,0xf8,0xc4,0xb0 }}, {{ 0x36,0xf0,0xa3,0xc8,0xdc,0x5e,0x46,0x20,0x12,0xcf,0xad,0x3f,0xda,0xd5,0x95,0x7a }}, {{ 0xb9,0x71,0x76,0x54,0xd3,0x74,0x9b,0x31,0x93,0xb2,0xd9,0xbf,0xad,0x78,0x49,0x7e }}, {{ 0x39,0x75,0xc6,0x34,0x38,0x65,0x60,0x32,0xb1,0xa3,0x02,0xd2,0xba,0x47,0x0b,0xc3 }}, {{ 0x37,0xcd,0xe3,0x34,0x7d,0x2d,0xa4,0xdc,0xf3,0x51,0xd1,0x1e,0x46,0xb8,0x1a,0xd4 }}, {{ 0xa0,0xf6,0xff,0xc6,0xcd,0x50,0xdf,0xa2,0x59,0x36,0x8d,0xdf,0x09,0x57,0x14,0x7b }}, {{ 0xeb,0x58,0x42,0xca,0x56,0xb5,0x94,0x16,0x10,0x86,0x38,0x5b,0x2c,0x4a,0x13,0x84 }}, {{ 0x5d,0xee,0x3a,0x5b,0x45,0x5f,0x92,0x7d,0x42,0x91,0x8a,0x7b,0xb6,0xc7,0xde,0xd9 }}, {{ 0x63,0xff,0xe5,0x55,0x38,0x3d,0xd6,0x5d,0xa4,0xad,0xcb,0xf6,0x0a,0xc3,0xd9,0x12 }}, {{ 0x86,0x15,0xd3,0x5a,0x47,0x81,0x3f,0xea,0x6b,0xbc,0x3b,0x82,0xd0,0x49,0xda,0x5d }}, {{ 0xb7,0x41,0xc9,0xf5,0x94,0x3f,0x91,0xa5,0x56,0x68,0x9c,0x12,0xc7,0xa1,0xd9,0x45 }}, {{ 0xb7,0x7c,0x2f,0x60,0xe3,0x2b,0x6a,0xd6,0x5e,0x24,0x6c,0xaf,0x8c,0x83,0x99,0xc7 }}, {{ 0x62,0xdb,0xad,0xab,0xda,0x51,0x82,0x0b,0x04,0xe6,0x7a,0x88,0xaa,0xae,0xfd,0xce }}, {{ 0x70,0x89,0xd2,0x6a,0x35,0x80,0x19,0xa4,0x71,0x0e,0x5c,0x68,0x33,0xf5,0x0c,0x67 }}, {{ 0x05,0xb3,0x50,0x50,0xbe,0x8d,0xaa,0x6e,0x32,0x02,0x1b,0x5e,0xe6,0xb7,0x5f,0x72 }}, {{ 0x85,0x60,0x7c,0x7a,0xdf,0xaa,0x67,0xc6,0xed,0x3e,0x7e,0x13,0x84,0x2c,0xd4,0x28 }}, {{ 0x51,0x4a,0xe3,0x56,0xe0,0x5f,0x7d,0x42,0xfb,0x41,0xec,0xfe,0xff,0xa4,0x74,0x13 }}, {{ 0xb8,0xc0,0xc1,0x01,0xc2,0x74,0xbb,0x84,0xc8,0xca,0x16,0x9c,0x6b,0xf3,0x3e,0x4d }}, {{ 0xab,0xd0,0x4a,0xc5,0xa4,0xc8,0xce,0xf4,0xf2,0xf5,0x2f,0xdc,0x22,0x4f,0x20,0xda }}, {{ 0x36,0x25,0x28,0x74,0xf0,0x4c,0x36,0x38,0xd2,0x9a,0x64,0xf8,0x11,0xcf,0xaf,0x28 }}, {{ 0x8b,0x79,0x18,0x09,0x14,0x19,0x3c,0xa0,0x5b,0x62,0x4d,0x09,0x18,0xdd,0x6a,0x89 }}, {{ 0xc0,0xae,0x4f,0x67,0x45,0x01,0x00,0xb7,0x75,0xc5,0x1c,0x56,0xdf,0x55,0x7c,0x04 }}, {{ 0xcd,0x5a,0xda,0xea,0xbc,0xfb,0x8d,0xc7,0x8a,0xd3,0xc6,0x70,0x12,0x34,0x82,0x84 }}, {{ 0x69,0x53,0x0d,0xc3,0x4d,0xd4,0x33,0xe9,0x00,0x1b,0x27,0x06,0x27,0x7f,0x48,0xf7 }} }; typedef void (*hash_f_t) (const void* key, int len, uint32_t seed, void* out); /* Verification code from the original SMHasher test suite */ static void smhasher_verification (hash_f_t hash, size_t const hashbytes, uint32_t* const res) { ssize_t const n_tests = 256; uint8_t key[n_tests]; uint8_t hashes[hashbytes * n_tests]; uint8_t final[hashbytes]; /* Hash keys of the form {0}, {0,1}, {0,1,2}... up to N=255,using 256-N as * the seed */ ssize_t i; for(i = 0; i < n_tests; i++) { key[i] = (uint8_t)i; hash (key, i, n_tests - i, &hashes[i * hashbytes]); } /* Then hash the result array */ hash (hashes, hashbytes * n_tests, 0, final); memcpy (res, final, sizeof(*res)); } static hash32_t smhasher_checks[3] = { {{ 0xE3, 0x7E, 0xF5, 0xB0 }}, /* mmh3_32 */ {{ 0x2A, 0xE6, 0xEC, 0xB3 }}, /* mmh3_x86_128 */ {{ 0x69, 0xBA, 0x84, 0x63 }} /* mmh3_x64_128 */ }; /* returns true if check fails */ static bool check (const void* const exp, const void* const got, ssize_t size) { if (memcmp (exp, got, size)) { ssize_t str_size = size * 2.2 + 1; char c[str_size], r[str_size]; gu_hexdump (exp, size, c, sizeof(c), false); gu_hexdump (got, size, r, sizeof(r), false); gu_info ("expected MurmurHash3:\n%s\nfound:\n%s\n", c, r); return true; } return false; } START_TEST (gu_mmh32_test) { int i; uint32_t out; smhasher_verification (gu_mmh3_32, sizeof(out), &out); fail_if (check (&smhasher_checks[0], &out, sizeof(out)), "gu_mmh3_32 failed."); for (i = 0; i < NUM_32_TESTS; i++) { uint32_t res = gu_mmh32 (test_input, i); res = gu_le32(res); fail_if(check (&test_output32[i], &res, sizeof(res)), "gu_mmh32() failed at step %d",i); } } END_TEST #if 0 /* x86 variant is faulty and unsuitable for short keys, ignore */ START_TEST (gu_mmh128_x86_test) { int i; uint32_t out32; smhasher_verification (gu_mmh3_x86_128, sizeof(hash128_t), &out32); fail_if (check (&smhasher_checks[1], &out32, sizeof(out32)), "gu_mmh3_x86_128 failed."); for (i = 0; i < NUM_128_TESTS; i++) { hash128_t out; gu_mmh3_x86_128 (test_input, i, GU_MMH32_SEED, &out); check (&test_output128[i], &out, sizeof(out)); } } END_TEST #endif /* 0 */ START_TEST (gu_mmh128_x64_test) { int i; uint32_t out32; smhasher_verification (gu_mmh3_x64_128, sizeof(hash128_t), &out32); fail_if (check (&smhasher_checks[2], &out32, sizeof(out32)), "gu_mmh3_x64_128 failed."); for (i = 0; i < NUM_128_TESTS; i++) { hash128_t out; gu_mmh128 (test_input, i, &out); fail_if(check (&test_output128[i], &out, sizeof(out)), "gu_mmh128() failed at step %d", i); } } END_TEST /* Tests partial hashing functions */ START_TEST (gu_mmh128_partial) { hash128_t part; gu_mmh128_ctx_t ctx; gu_mmh128_init (&ctx); gu_mmh128_append (&ctx, test_input, 31); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[31], &part, sizeof(part)), "gu_mmh128_get() failed at one go"); gu_mmh128_init (&ctx); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[0], &part, sizeof(part)), "gu_mmh128_get() failed at init"); gu_mmh128_append (&ctx, test_input + 0, 0); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[0], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 0); gu_mmh128_append (&ctx, test_input + 0, 1); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[1], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 1); gu_mmh128_append (&ctx, test_input + 1, 2); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[3], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 3); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[3], &part, sizeof(part)), "gu_mmh128_get() failed at length %d again", 3); gu_mmh128_append (&ctx, test_input + 3, 20); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[23], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 23); gu_mmh128_append (&ctx, test_input + 23, 0); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[23], &part, sizeof(part)), "gu_mmh128_get() failed at length %d again", 23); gu_mmh128_append (&ctx, test_input + 23, 3); gu_mmh128_append (&ctx, test_input + 26, 3); gu_mmh128_append (&ctx, test_input + 29, 2); gu_mmh128_get (&ctx, &part); fail_if(check (&test_output128[31], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 31); } END_TEST Suite *gu_mmh3_suite(void) { Suite *s = suite_create("MurmurHash3"); TCase *tc = tcase_create("gu_mmh3"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_mmh32_test); // tcase_add_test (tc, gu_mmh128_x86_test); tcase_add_test (tc, gu_mmh128_x64_test); tcase_add_test (tc, gu_mmh128_partial); return s; } galera-3-25.3.20/galerautils/tests/gu_mem_test.h0000644000015300001660000000026313042054732021276 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_mem_test__ #define __gu_mem_test__ extern Suite *gu_mem_suite(void); #endif /* __gu_mem_test__ */ galera-3-25.3.20/galerautils/tests/gu_mem_test.c0000644000015300001660000000351513042054732021274 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #define DEBUG_MALLOC // turn on the debugging code #define TEST_SIZE 1024 #include #include #include #include #include "gu_mem_test.h" #include "../src/galerautils.h" START_TEST (gu_mem_test) { void* ptr1; void* ptr2; int res; int i; ptr1 = gu_malloc (0); fail_if (NULL != ptr1, "Zero memory allocated, non-NULL pointer returned"); mark_point(); ptr1 = gu_malloc (TEST_SIZE); fail_if (NULL == ptr1, "NULL pointer returned for allocation" " errno: %s", strerror (errno)); mark_point(); ptr2 = memset (ptr1, 0xab, TEST_SIZE); fail_if (ptr2 != ptr1, "Memset changed pointer"); ptr2 = NULL; mark_point(); ptr2 = gu_realloc (ptr2, TEST_SIZE); fail_if (NULL == ptr2, "NULL pointer returned for reallocation" " errno: %s", strerror (errno)); memcpy (ptr2, ptr1, TEST_SIZE); mark_point(); ptr1 = gu_realloc (ptr1, TEST_SIZE + TEST_SIZE); res = memcmp (ptr1, ptr2, TEST_SIZE); fail_if (res != 0, "Realloc changed the contents of the memory"); mark_point(); ptr1 = gu_realloc (ptr1, 0); fail_if (res != 0, "Realloc to 0 didn't return NULL"); mark_point(); ptr1 = gu_calloc (1, TEST_SIZE); fail_if (NULL == ptr1, "NULL pointer returned for allocation" " errno: %s", strerror (errno)); for (i = 0; i < TEST_SIZE; i++) { res = ((char*)ptr1)[i]; if (res != 0) break; } fail_if (res != 0, "Calloc didn't clear up the memory"); mark_point(); gu_free (ptr1); mark_point(); gu_free (ptr2); } END_TEST Suite *gu_mem_suite(void) { Suite *s = suite_create("Galera memory utils"); TCase *tc_mem = tcase_create("gu_mem"); suite_add_tcase (s, tc_mem); tcase_add_test(tc_mem, gu_mem_test); return s; } galera-3-25.3.20/galerautils/tests/SConscript0000644000015300001660000000443013042054732020627 0ustar jenkinsjenkins Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' # #/galerautils/src ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) gu_tests = env.Program(target = 'gu_tests', source = Split(''' gu_tests.c gu_mem_test.c gu_vec_test.c gu_bswap_test.c gu_fnv_test.c gu_mmh3_test.c gu_spooky_test.c gu_crc32c_test.c gu_hash_test.c gu_time_test.c gu_fifo_test.c gu_uuid_test.c gu_dbug_test.c gu_lock_step_test.c gu_str_test.c gu_utils_test.c ''')) env.Test("gu_tests.passed", gu_tests) env.Alias("test", "gu_tests.passed") Clean(gu_tests, '#/gu_tests.log') gu_testspp = env.Program(target = 'gu_tests++', source = Split(''' gu_atomic_test.cpp gu_vector_test.cpp gu_string_test.cpp gu_vlq_test.cpp gu_digest_test.cpp gu_mem_pool_test.cpp gu_alloc_test.cpp gu_rset_test.cpp gu_string_utils_test.cpp gu_uri_test.cpp gu_config_test.cpp gu_net_test.cpp gu_datetime_test.cpp gu_histogram_test.cpp gu_stats_test.cpp gu_thread_test.cpp gu_tests++.cpp ''')) env.Test("gu_tests++.passed", gu_testspp) env.Alias("test", "gu_tests++.passed") Clean(gu_testspp, '#/gu_tests++.log') galera-3-25.3.20/galerautils/tests/gu_str_test.h0000644000015300001660000000025413042054732021330 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy #ifndef __gu_str_test__ #define __gu_str_test__ extern Suite *gu_str_suite(void); #endif /* __gu_str_test__ */ galera-3-25.3.20/galerautils/tests/gu_histogram_test.hpp0000644000015300001660000000033213042054732023052 0ustar jenkinsjenkins/* * Copyright (C) 2014 Codership Oy */ #ifndef __gu_histogram_test__ #define __gu_histogram_test__ #include extern Suite *gu_histogram_suite(void); #endif // __gu_histogram_test__ galera-3-25.3.20/galerautils/tests/gu_thread_test.hpp0000644000015300001660000000031113042054732022321 0ustar jenkinsjenkins// // Copyright (C) 2016 Codership Oy // #ifndef GU_THREAD_TEST_HPP #define GU_THREAD_TEST_HPP #include extern Suite *gu_thread_suite(); #endif // GU_THREAD_TEST_HPP galera-3-25.3.20/galerautils/tests/gu_hash_test.c0000644000015300001660000001771113042054732021444 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /* * This unit test is mostly to check that Galera hash definitions didn't change: * correctness of hash algorithms definitions is checked in respective unit * tests. * * By convention checks are made against etalon byte arrays, so integers must be * converted to little-endian. * * $Id$ */ #include "gu_hash_test.h" #include "../src/gu_hash.h" #include "../src/gu_log.h" #include "../src/gu_hexdump.h" /* checks equivalence of two buffers, returns true if check fails and logs * buffer contents. */ static bool check (const void* const exp, const void* const got, ssize_t size) { if (memcmp (exp, got, size)) { ssize_t str_size = size * 2.2 + 1; char c[str_size], r[str_size]; gu_hexdump (exp, size, c, sizeof(c), false); gu_hexdump (got, size, r, sizeof(r), false); gu_info ("expected hash value:\n%s\nfound:\n%s\n", c, r); return true; } return false; } static const char test_msg[2048] = { 0, }; #define GU_HASH_TEST_LENGTH 43 /* some random prime */ static const uint8_t gu_hash128_check[16] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84,0x73,0x41,0x3F,0xA5,0xEB,0x27,0x40,0x2F }; static const uint8_t gu_hash64_check[8] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84 }; static const uint8_t gu_hash32_check[4] = { 0xFA,0x2C,0x78,0x67 }; /* Tests partial hashing functions */ START_TEST (gu_hash_test) { gu_hash_t h; gu_hash_init(&h); gu_hash_append(&h, test_msg, GU_HASH_TEST_LENGTH); uint8_t res128[16]; gu_hash_get128 (&h, res128); fail_if (check (gu_hash128_check, res128, sizeof(res128)), "gu_hash_get128() failed."); uint64_t res64 = gu_hash_get64(&h); fail_if (gu_hash64(test_msg, GU_HASH_TEST_LENGTH) != res64); res64 = gu_le64(res64); fail_if (check (gu_hash64_check, &res64, sizeof(res64)), "gu_hash_get64() failed."); uint32_t res32 = gu_hash_get32(&h); fail_if (gu_hash32(test_msg, GU_HASH_TEST_LENGTH) != res32); res32 = gu_le32(res32); fail_if (check (gu_hash32_check, &res32, sizeof(res32)), "gu_hash_get32() failed."); } END_TEST static const uint8_t fast_hash128_check0 [16] = { 0xA9,0xCE,0x5A,0x56,0x0C,0x0B,0xF7,0xD6,0x63,0x4F,0x6F,0x81,0x0E,0x0B,0xF2,0x0A }; static const uint8_t fast_hash128_check511 [16] = { 0xC6,0x7F,0x4C,0xE7,0x6F,0xE0,0xDA,0x14,0xCC,0x9F,0x21,0x76,0xAF,0xB5,0x12,0x1A }; static const uint8_t fast_hash128_check512 [16] = { 0x38,0x8D,0x2B,0x90,0xC8,0x7F,0x11,0x53,0x3F,0xB4,0x32,0xC1,0xD7,0x2B,0x04,0x39 }; static const uint8_t fast_hash128_check2011[16] = { 0xB7,0xCE,0x75,0xC7,0xB4,0x31,0xBC,0xC8,0x95,0xB3,0x41,0xB8,0x5B,0x8E,0x77,0xF9 }; static const uint8_t fast_hash64_check0 [8] = { 0x6C, 0x55, 0xB8, 0xA1, 0x02, 0xC6, 0x21, 0xCA }; static const uint8_t fast_hash64_check15 [8] = { 0x28, 0x49, 0xE8, 0x34, 0x7A, 0xAB, 0x49, 0x34 }; static const uint8_t fast_hash64_check16 [8] = { 0x44, 0x40, 0x2C, 0x82, 0xD3, 0x8D, 0xAA, 0xFE }; static const uint8_t fast_hash64_check511 [8] = { 0xC6, 0x7F, 0x4C, 0xE7, 0x6F, 0xE0, 0xDA, 0x14 }; static const uint8_t fast_hash64_check512 [8] = { 0x38, 0x8D, 0x2B, 0x90, 0xC8, 0x7F, 0x11, 0x53 }; static const uint8_t fast_hash64_check2011[8] = { 0xB7, 0xCE, 0x75, 0xC7, 0xB4, 0x31, 0xBC, 0xC8 }; static const uint8_t fast_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t fast_hash32_check31 [4] = { 0x1E, 0xFF, 0x48, 0x38 }; static const uint8_t fast_hash32_check32 [4] = { 0x63, 0xC2, 0x53, 0x0D }; static const uint8_t fast_hash32_check511 [4] = { 0xC6, 0x7F, 0x4C, 0xE7 }; static const uint8_t fast_hash32_check512 [4] = { 0x38, 0x8D, 0x2B, 0x90 }; static const uint8_t fast_hash32_check2011[4] = { 0xB7, 0xCE, 0x75, 0xC7 }; /* Tests fast hash functions */ START_TEST (gu_fast_hash_test) { uint8_t res128[16]; gu_fast_hash128 (test_msg, 0, res128); fail_if (check (fast_hash128_check0, res128, sizeof(res128))); gu_fast_hash128 (test_msg, 511, res128); fail_if (check (fast_hash128_check511, res128, sizeof(res128))); gu_fast_hash128 (test_msg, 512, res128); fail_if (check (fast_hash128_check512, res128, sizeof(res128))); gu_fast_hash128 (test_msg, 2011, res128); fail_if (check (fast_hash128_check2011, res128, sizeof(res128))); uint64_t res64; res64 = gu_fast_hash64 (test_msg, 0); res64 = gu_le64(res64); fail_if (check (fast_hash64_check0, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 15); res64 = gu_le64(res64); fail_if (check (fast_hash64_check15, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 16); res64 = gu_le64(res64); fail_if (check (fast_hash64_check16, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 511); res64 = gu_le64(res64); fail_if (check (fast_hash64_check511, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 512); res64 = gu_le64(res64); fail_if (check (fast_hash64_check512, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 2011); res64 = gu_le64(res64); fail_if (check (fast_hash64_check2011, &res64, sizeof(res64))); uint32_t res32; res32 = gu_fast_hash32 (test_msg, 0); res32 = gu_le32(res32); fail_if (check (fast_hash32_check0, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 31); res32 = gu_le32(res32); fail_if (check (fast_hash32_check31, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 32); res32 = gu_le32(res32); fail_if (check (fast_hash32_check32, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 511); res32 = gu_le32(res32); fail_if (check (fast_hash32_check511, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 512); res32 = gu_le32(res32); fail_if (check (fast_hash32_check512, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 2011); res32 = gu_le32(res32); fail_if (check (fast_hash32_check2011, &res32, sizeof(res32))); } END_TEST /* Tests table hash functions: * - for 64-bit platforms table hash should be identical to fast 64-bit hash, * - for 32-bit platforms table hash is different. */ #if GU_WORDSIZE == 64 START_TEST (gu_table_hash_test) { size_t res; fail_if (sizeof(res) > 8); res = gu_table_hash (test_msg, 0); res = gu_le64(res); fail_if (check (fast_hash64_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 15); res = gu_le64(res); fail_if (check (fast_hash64_check15, &res, sizeof(res))); res = gu_table_hash (test_msg, 16); res = gu_le64(res); fail_if (check (fast_hash64_check16, &res, sizeof(res))); res = gu_table_hash (test_msg, 511); res = gu_le64(res); fail_if (check (fast_hash64_check511, &res, sizeof(res))); res = gu_table_hash (test_msg, 512); res = gu_le64(res); fail_if (check (fast_hash64_check512, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le64(res); fail_if (check (fast_hash64_check2011, &res, sizeof(res))); } END_TEST #elif GU_WORDSIZE == 32 static const uint8_t table_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t table_hash32_check32 [4] = { 0x65, 0x16, 0x17, 0x42 }; static const uint8_t table_hash32_check2011[4] = { 0xF9, 0xBC, 0xEF, 0x7A }; START_TEST (gu_table_hash_test) { size_t res; fail_if (sizeof(res) > 4); res = gu_table_hash (test_msg, 0); res = gu_le32(res); fail_if (check (table_hash32_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 32); res = gu_le32(res); fail_if (check (table_hash32_check32, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le32(res); fail_if (check (table_hash32_check2011, &res, sizeof(res))); } END_TEST #else /* GU_WORDSIZE == 32 */ # error "Unsupported word size" #endif Suite *gu_hash_suite(void) { Suite *s = suite_create("Galera hash"); TCase *tc = tcase_create("gu_hash"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_hash_test); tcase_add_test (tc, gu_fast_hash_test); tcase_add_test (tc, gu_table_hash_test); return s; } galera-3-25.3.20/galerautils/tests/gu_fifo_test.c0000644000015300001660000001271613042054732021444 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #include #include "gu_fifo_test.h" #include "../src/galerautils.h" #define FIFO_LENGTH 10000L START_TEST (gu_fifo_test) { gu_fifo_t* fifo; long i; size_t* item; long used; fifo = gu_fifo_create (0, 1); fail_if (fifo != NULL); fifo = gu_fifo_create (1, 0); fail_if (fifo != NULL); fifo = gu_fifo_create (1, 1); fail_if (fifo == NULL); gu_fifo_close (fifo); mark_point(); gu_fifo_destroy (fifo); mark_point(); fifo = gu_fifo_create (FIFO_LENGTH, sizeof(i)); fail_if (fifo == NULL); fail_if (gu_fifo_length(fifo) != 0, "fifo->used is %lu for an empty FIFO", gu_fifo_length(fifo)); // fill FIFO for (i = 0; i < FIFO_LENGTH; i++) { item = gu_fifo_get_tail (fifo); fail_if (item == NULL, "could not get item %ld", i); *item = i; gu_fifo_push_tail (fifo); } used = i; fail_if (gu_fifo_length(fifo) != used, "used is %zu, expected %zu", used, gu_fifo_length(fifo)); // test pop for (i = 0; i < used; i++) { int err; item = gu_fifo_get_head (fifo, &err); fail_if (item == NULL, "could not get item %ld", i); fail_if (*item != (ulong)i, "got %ld, expected %ld", *item, i); gu_fifo_pop_head (fifo); } fail_if (gu_fifo_length(fifo) != 0, "gu_fifo_length() for empty queue is %ld", gu_fifo_length(fifo)); gu_fifo_close (fifo); int err; item = gu_fifo_get_head (fifo, &err); fail_if (item != NULL); fail_if (err != -ENODATA); gu_fifo_destroy (fifo); } END_TEST static pthread_mutex_t sync_mtx = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t sync_cond = PTHREAD_COND_INITIALIZER; #define ITEM 12345 static void* cancel_thread (void* arg) { gu_fifo_t* q = arg; /* sync with parent */ pthread_mutex_lock (&sync_mtx); pthread_cond_signal (&sync_cond); pthread_mutex_unlock (&sync_mtx); size_t* item; int err; /* try to get from non-empty queue */ item = gu_fifo_get_head (q, &err); fail_if (NULL != item, "Got item %p: %zu", item, item ? *item : 0); fail_if (-ECANCELED != err); /* signal end of the first gu_fifo_get_head() */ pthread_mutex_lock (&sync_mtx); pthread_cond_signal (&sync_cond); /* wait until gets are resumed */ pthread_cond_wait (&sync_cond, &sync_mtx); item = gu_fifo_get_head (q, &err); fail_if (NULL == item); fail_if (ITEM != *item); gu_fifo_pop_head (q); /* signal end of the 2nd gu_fifo_get_head() */ pthread_cond_signal (&sync_cond); pthread_mutex_unlock (&sync_mtx); /* try to get from empty queue (should block) */ item = gu_fifo_get_head (q, &err); fail_if (NULL != item); fail_if (-ECANCELED != err); /* signal end of the 3rd gu_fifo_get_head() */ pthread_mutex_lock (&sync_mtx); pthread_cond_signal (&sync_cond); /* wait until fifo is closed */ pthread_cond_wait (&sync_cond, &sync_mtx); item = gu_fifo_get_head (q, &err); fail_if (NULL != item); fail_if (-ECANCELED != err); /* signal end of the 4th gu_fifo_get_head() */ pthread_cond_signal (&sync_cond); /* wait until fifo is resumed */ pthread_cond_wait (&sync_cond, &sync_mtx); pthread_mutex_unlock (&sync_mtx); item = gu_fifo_get_head (q, &err); fail_if (NULL != item); fail_if (-ENODATA != err); return NULL; } START_TEST(gu_fifo_cancel_test) { gu_fifo_t* q = gu_fifo_create (FIFO_LENGTH, sizeof(size_t)); size_t* item = gu_fifo_get_tail (q); fail_if (item == NULL); *item = ITEM; gu_fifo_push_tail (q); pthread_mutex_lock (&sync_mtx); pthread_t thread; pthread_create (&thread, NULL, cancel_thread, q); /* sync with child thread */ gu_fifo_lock (q); pthread_cond_wait (&sync_cond, &sync_mtx); int err; err = gu_fifo_cancel_gets (q); fail_if (0 != err); err = gu_fifo_cancel_gets (q); fail_if (-EBADFD != err); /* allow the first gu_fifo_get_head() */ gu_fifo_release (q); mark_point(); /* wait for the first gu_fifo_get_head() to complete */ pthread_cond_wait (&sync_cond, &sync_mtx); err = gu_fifo_resume_gets (q); fail_if (0 != err); err = gu_fifo_resume_gets (q); fail_if (-EBADFD != err); /* signal that now gets are resumed */ pthread_cond_signal (&sync_cond); /* wait for the 2nd gu_fifo_get_head() to complete */ pthread_cond_wait (&sync_cond, &sync_mtx); /* wait a bit to make sure 3rd gu_fifo_get_head() is blocked * (even if it is not - still should work)*/ usleep (100000 /* 0.1s */); err = gu_fifo_cancel_gets (q); fail_if (0 != err); /* wait for the 3rd gu_fifo_get_head() to complete */ pthread_cond_wait (&sync_cond, &sync_mtx); gu_fifo_close (q); // closes for puts, but the q still must be canceled pthread_cond_signal (&sync_cond); /* wait for the 4th gu_fifo_get_head() to complete */ pthread_cond_wait (&sync_cond, &sync_mtx); gu_fifo_resume_gets (q); // resumes gets pthread_cond_signal (&sync_cond); pthread_mutex_unlock (&sync_mtx); mark_point(); pthread_join(thread, NULL); } END_TEST Suite *gu_fifo_suite(void) { Suite *s = suite_create("Galera FIFO functions"); TCase *tc = tcase_create("gu_fifo"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_fifo_test); tcase_add_test (tc, gu_fifo_cancel_test); return s; } galera-3-25.3.20/galerautils/tests/gu_config_test.cpp0000644000015300001660000000354513042054732022326 0ustar jenkinsjenkins// Copyright (C) 2013-2014 Codership Oy // $Id$ #include "../src/gu_config.hpp" #include "gu_config_test.hpp" static std::string const key("test_key"); static std::string const another_key("another_key"); static std::string const str_value("123"); static long long const int_value( 123 ); START_TEST (gu_config_test) { gu::Config cnf; std::string svalue; long long ivalue; fail_if(cnf.has(key)); try { cnf.is_set(key); fail("gu::NotFound expected"); } catch(gu::NotFound&) {} cnf.add(key); fail_unless(cnf.has(key)); fail_if(cnf.is_set(key)); #define SUFFIX_CHECK(_suf_,_shift_) \ svalue = str_value + _suf_; \ cnf.set(key, svalue); \ fail_unless(cnf.is_set(key)); \ fail_unless(cnf.get(key) == svalue); \ ivalue = cnf.get(key); \ fail_if(ivalue != (int_value << _shift_)); SUFFIX_CHECK('T', 40); // check overflow checks try { ivalue = cnf.get(key); fail("gu::Exception expected"); } catch (gu::Exception&) {} try { ivalue = cnf.get(key); fail("gu::Exception expected"); } catch (gu::Exception&) {} try { ivalue = cnf.get(key); fail("gu::Exception expected"); } catch (gu::Exception&) {} SUFFIX_CHECK('G', 30); SUFFIX_CHECK('M', 20); SUFFIX_CHECK('K', 10); // try { cnf.add(key, str_value); fail("gu::Exception expected"); } // catch (gu::Exception& e) {} cnf.add(another_key, str_value); fail_unless(cnf.is_set(another_key)); ivalue = cnf.get(another_key); fail_if(ivalue != int_value); } END_TEST Suite *gu_config_suite(void) { Suite *s = suite_create("gu::Config"); TCase *tc = tcase_create("gu_config_test"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_config_test); return s; } galera-3-25.3.20/galerautils/tests/gu_digest_test.hpp0000644000015300001660000000032313042054732022334 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_digest_test__ #define __gu_digest_test__ #include extern Suite *gu_digest_suite(void); #endif /* __gu_digest_test__ */ galera-3-25.3.20/galerautils/tests/gu_atomic_test.hpp0000644000015300001660000000031413042054732022331 0ustar jenkinsjenkins// Copyright (C) 2014 Codership Oy // $Id$ #ifndef __gu_atomic_test__ #define __gu_atomic_test__ #include Suite *gu_atomic_suite(void); #endif /* __gu_atomic_test__ */ galera-3-25.3.20/galerautils/tests/gu_vec_test.h0000644000015300001660000000031413042054732021272 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_vec_test__ #define __gu_vec_test__ #include extern Suite *gu_vec_suite(void); #endif /* __gu_vec_test__ */ galera-3-25.3.20/galerautils/tests/gu_net_test.cpp0000644000015300001660000000403313042054732021640 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy #include #include #include #include #include #include #include #include #include "gu_logger.hpp" #include "gu_uri.hpp" #include "gu_resolver.hpp" #include "gu_lock.hpp" #include "gu_prodcons.hpp" #include "gu_net_test.hpp" using std::vector; using std::string; using std::deque; using std::mem_fun; using std::for_each; using namespace gu; using namespace gu::net; using namespace gu::prodcons; START_TEST(test_resolver) { std::string tcp_lh4("tcp://127.0.0.1:2002"); Addrinfo tcp_lh4_ai(resolve(tcp_lh4)); fail_unless(tcp_lh4_ai.get_family() == AF_INET); fail_unless(tcp_lh4_ai.get_socktype() == SOCK_STREAM); fail_unless(tcp_lh4_ai.to_string() == tcp_lh4, "%s != %s", tcp_lh4_ai.to_string().c_str(), tcp_lh4.c_str()); std::string tcp_lh6("tcp://[::1]:2002"); Addrinfo tcp_lh6_ai(resolve(tcp_lh6)); fail_unless(tcp_lh6_ai.get_family() == AF_INET6); fail_unless(tcp_lh6_ai.get_socktype() == SOCK_STREAM); fail_unless(tcp_lh6_ai.to_string() == tcp_lh6, "%s != %s", tcp_lh6_ai.to_string().c_str(), tcp_lh6.c_str()); std::string lh("tcp://localhost:2002"); Addrinfo lh_ai(resolve(lh)); fail_unless(lh_ai.to_string() == "tcp://127.0.0.1:2002" || lh_ai.to_string() == "tcp://[::1]:2002"); } END_TEST START_TEST(trac_288) { try { string url("tcp://do-not-resolve:0"); (void)resolve(url); } catch (Exception& e) { log_debug << "exception was " << e.what(); } } END_TEST Suite* gu_net_suite() { Suite* s = suite_create("galerautils++ Networking"); TCase* tc; tc = tcase_create("test_resolver"); tcase_add_test(tc, test_resolver); tcase_set_timeout(tc, 30); suite_add_tcase(s, tc); tc = tcase_create("trac_288"); tcase_add_test(tc, trac_288); #if 0 /* bogus test, commenting out for now */ suite_add_tcase(s, tc); #endif return s; } galera-3-25.3.20/galerautils/tests/gu_uuid_test.h0000644000015300001660000000030013042054732021456 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gu_uuid_test__ #define __gu_uuid_test__ extern Suite *gu_uuid_suite(void); #endif /* __gu_uuid_test__ */ galera-3-25.3.20/galerautils/tests/gu_string_utils_test.cpp0000644000015300001660000000533613042054732023607 0ustar jenkinsjenkins// Copyright (C) 2009-2010 Codership Oy #include "gu_string_utils.hpp" #include "gu_string_utils_test.hpp" using std::string; using std::vector; START_TEST(test_strsplit) { string str = "foo bar baz"; vector vec = gu::strsplit(str, ' '); fail_unless(vec.size() == 3); fail_unless(vec[0] == "foo"); fail_unless(vec[1] == "bar"); fail_unless(vec[2] == "baz"); } END_TEST START_TEST(test_tokenize) { vector vec = gu::tokenize("", 'a', 'b', false); fail_unless(vec.size() == 0); vec = gu::tokenize("", 'a', 'b', true); fail_unless(vec.size() == 1); fail_unless(vec[0] == ""); vec = gu::tokenize("a", 'a', 'b', false); fail_unless(vec.size() == 0); vec = gu::tokenize("a", 'a', 'b', true); fail_unless(vec.size() == 2); fail_unless(vec[0] == ""); fail_unless(vec[1] == ""); vec = gu::tokenize("foo bar baz"); fail_unless(vec.size() == 3); fail_unless(vec[0] == "foo"); fail_unless(vec[1] == "bar"); fail_unless(vec[2] == "baz"); vec = gu::tokenize("foo\\ bar baz"); fail_unless(vec.size() == 2); fail_unless(vec[0] == "foo bar", "expected 'foo bar', found '%s'", vec[0].c_str()); fail_unless(vec[1] == "baz"); vec = gu::tokenize("foo\\;;bar;;baz;", ';', '\\', false); fail_unless(vec.size() == 3); fail_unless(vec[0] == "foo;"); fail_unless(vec[1] == "bar"); fail_unless(vec[2] == "baz"); vec = gu::tokenize("foo\\;;bar;;baz;", ';', '\\', true); fail_unless(vec.size() == 5, "vetor length %zu, expected 5", vec.size()); fail_unless(vec[0] == "foo;"); fail_unless(vec[1] == "bar"); fail_unless(vec[2] == ""); fail_unless(vec[3] == "baz"); fail_unless(vec[4] == ""); } END_TEST START_TEST(test_trim) { string full1 = ".,wklerf joweji"; string full2 = full1; gu::trim (full2); fail_if (full1 != full2); string part = " part "; gu::trim (part); fail_if (part.length() != 4); fail_if (0 != part.compare("part")); string empty; gu::trim (empty); fail_if (!empty.empty()); empty += ' '; empty += '\t'; empty += '\n'; empty += '\f'; fail_if (empty.empty()); gu::trim (empty); fail_if (!empty.empty(), "string contents: '%s', expected empty", empty.c_str()); } END_TEST Suite* gu_string_utils_suite(void) { Suite* s = suite_create("String Utils"); TCase* tc; tc = tcase_create("strsplit"); tcase_add_test(tc, test_strsplit); suite_add_tcase(s, tc); tc = tcase_create("tokenize"); tcase_add_test(tc, test_tokenize); suite_add_tcase(s, tc); tc = tcase_create("trim"); tcase_add_test(tc, test_trim); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/galerautils/tests/gu_spooky_test.h0000644000015300001660000000032313042054732022041 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_spooky_test__ #define __gu_spooky_test__ #include extern Suite *gu_spooky_suite(void); #endif /* __gu_spooky_test__ */ galera-3-25.3.20/galerautils/tests/gu_to_test.c0000644000015300001660000002203413042054732021135 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include // printf() #include // strerror() #include // strtol(), exit(), EXIT_SUCCESS, EXIT_FAILURE #include // errno #include // gettimeofday() #include // usleep() #include #include struct thread_ctx { pthread_t thread; long thread_id; long stat_grabs; // how many times gcs_to_grab() was successful long stat_cancels;// how many times gcs_to_cancel() was called long stat_fails; // how many times gcs_to_grab() failed long stat_self; // how many times gcs_self_cancel() was called }; /* returns a semirandom number (hash) from seqno */ static inline ulong my_rnd (uint64_t x) { x = 2654435761U * x; // http://www.concentric.net/~Ttwang/tech/inthash.htm return (ulong)(x ^ (x >> 32)); // combine upper and lower halfs for better // randomness } /* whether to cancel self */ static inline ulong self_cancel (ulong rnd) { return !(rnd & 0xf); // will return TRUE once in 16 } /* how many other seqnos to cancel */ static inline ulong cancel (ulong rnd) { #if 0 // this causes probablity of conflict 88% // and average conflicts per seqno 3.5. Reveals a lot of corner cases return (rnd & 0x70) >> 4; // returns 0..7 #else // this is more realistic. // probability of conflict 25%, conflict rate 0.375 ulong ret = (rnd & 0x70) >> 4; // returns 0,0,0,0,0,0,1,2 if (gu_likely(ret < 5)) return 0; else return (ret - 5); #endif } /* offset of seqnos to cancel */ static inline ulong cancel_offset (ulong rnd) { return ((rnd & 0x700) >> 8) + 1; // returns 1 - 8 } static gu_to_t* to = NULL; static ulong thread_max = 16; // default number of threads static gu_seqno_t seqno_max = 1<<20; // default number of seqnos to check /* mutex to synchronize threads start */ static pthread_mutex_t start = PTHREAD_MUTEX_INITIALIZER; static const unsigned int t = 10; // optimal sleep time static const struct timespec tsleep = { 0, 10000000 }; // 10 ms void* run_thread(void* ctx) { struct thread_ctx* thd = ctx; gu_seqno_t seqno = thd->thread_id; // each thread starts with own offset // to guarantee uniqueness of seqnos // without having to lock mutex pthread_mutex_lock (&start); // wait for start signal pthread_mutex_unlock (&start); while (seqno < seqno_max) { long ret; ulong rnd = my_rnd(seqno); if (gu_unlikely(self_cancel(rnd))) { // printf("Self-cancelling %8llu\n", (unsigned long long)seqno); while ((ret = gu_to_self_cancel(to, seqno)) == -EAGAIN) usleep (t); if (gu_unlikely(ret)) { fprintf (stderr, "gu_to_self_cancel(%llu) returned %ld (%s)\n", (unsigned long long)seqno, ret, strerror(-ret)); exit (EXIT_FAILURE); } else { // printf ("Self-cancel success (%llu)\n", (unsigned long long)seqno); thd->stat_self++; } } else { // printf("Grabbing %8llu\n", (unsigned long long)seqno); while ((ret = gu_to_grab (to, seqno)) == -EAGAIN) nanosleep (&tsleep, NULL); if (gu_unlikely(ret)) { if (gu_likely(-ECANCELED == ret)) { // printf ("canceled (%llu)\n", (unsigned long long)seqno); thd->stat_fails++; } else { fprintf (stderr, "gu_to_grab(%llu) returned %ld (%s)\n", (unsigned long long)seqno, ret, strerror(-ret)); exit (EXIT_FAILURE); } } else { long cancels = cancel(rnd); // printf ("success (%llu), cancels = %ld\n", (unsigned long long)seqno, cancels); if (gu_likely(cancels)) { long offset = cancel_offset (rnd); gu_seqno_t cancel_seqno = seqno + offset; while (cancels-- && (cancel_seqno < seqno_max)) { ret = gu_to_cancel(to, cancel_seqno); if (gu_unlikely(ret)) { fprintf (stderr, "gu_to_cancel(%llu) by %llu " "failed: %s\n", (unsigned long long)cancel_seqno, (unsigned long long)seqno, strerror (-ret)); exit (EXIT_FAILURE); } else { // printf ("%llu canceled %llu\n", // seqno, cancel_seqno); cancel_seqno += offset; thd->stat_cancels++; } } } thd->stat_grabs++; ret = gu_to_release(to, seqno); if (gu_unlikely(ret)) { fprintf (stderr, "gu_to_release(%llu) failed: %ld(%s)\n", (unsigned long long)seqno, ret, strerror(-ret)); exit (EXIT_FAILURE); } } } seqno += thread_max; // this together with unique starting point // guarantees that seqnos are unique } // printf ("Thread %ld exiting. Last seqno = %llu\n", // thd->thread_id, (unsigned long long)(seqno - thread_max)); return NULL; } int main (int argc, char* argv[]) { // minimum to length required by internal logic ulong to_len = cancel(0xffffffff) * cancel_offset(0xffffffff); errno = 0; if (argc > 1) seqno_max = (1 << atol(argv[0])); if (argc > 2) thread_max = (1 << atol(argv[1])); if (errno) { fprintf (stderr, "Usage: %s [seqno [threads]]\nBoth seqno and threads" "are exponents of 2^n.\n", argv[0]); exit(errno); } printf ("Starting with %lu threads and %llu maximum seqno.\n", thread_max, (unsigned long long)seqno_max); /* starting with 0, enough space for all threads and cancels */ // 4 is a magic number to get it working without excessive sleep on amd64 to_len = to_len > thread_max ? to_len : thread_max; to_len *= 4; to = gu_to_create (to_len, 0); if (to != NULL) { printf ("Created TO monitor of length %lu\n", to_len); } else { exit (-ENOMEM); } /* main block */ { long i, ret; clock_t start_clock, stop_clock; double time_spent; struct thread_ctx thread[thread_max]; pthread_mutex_lock (&start); { /* initialize threads */ for (i = 0; i < thread_max; i++) { thread[i].thread_id = i; thread[i].stat_grabs = 0; thread[i].stat_cancels = 0; thread[i].stat_fails = 0; thread[i].stat_self = 0; ret = pthread_create(&(thread[i].thread), NULL, run_thread, &thread[i]); if (ret) { fprintf (stderr, "Failed to create thread %ld: %s", i, strerror(ret)); exit (EXIT_FAILURE); } } start_clock = clock(); } pthread_mutex_unlock (&start); // release threads /* wait for threads to complete and accumulate statistics */ pthread_join (thread[0].thread, NULL); for (i = 1; i < thread_max; i++) { pthread_join (thread[i].thread, NULL); thread[0].stat_grabs += thread[i].stat_grabs; thread[0].stat_cancels += thread[i].stat_cancels; thread[0].stat_fails += thread[i].stat_fails; thread[0].stat_self += thread[i].stat_self; } stop_clock = clock(); time_spent = gu_clock_diff (stop_clock,start_clock); /* print statistics */ printf ("%llu seqnos in %.3f seconds (%.3f seqno/sec)\n", (unsigned long long)seqno_max, time_spent, ((double) seqno_max)/time_spent); printf ("Overhead at 10000 actions/second: %.2f%%\n", (time_spent * 10000 * 100/* for % */)/seqno_max); printf ("Grabbed: %9lu\n" "Failed: %9lu\n" "Self-cancelled: %9lu\n" "Canceled: %9lu (can exceed total number of seqnos)\n", thread[0].stat_grabs, thread[0].stat_fails, thread[0].stat_self, thread[0].stat_cancels ); if (seqno_max != (thread[0].stat_grabs+thread[0].stat_fails+thread[0].stat_self)) { fprintf (stderr, "Error: total number of grabbed, failed and " "self-cancelled waiters does not match total seqnos.\n"); exit (EXIT_FAILURE); } } return 0; } galera-3-25.3.20/galerautils/tests/gu_string_test.cpp0000644000015300001660000000532613042054732022366 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #include "../src/gu_string.hpp" #include "gu_string_test.hpp" START_TEST (ctor_test) { gu::String<8> str1; // default fail_if (str1.size() != 0); fail_if (strlen(str1.c_str()) != 0); const char* const test_string1("test"); gu::String<8> str2(test_string1); // from char* fail_if (str2.size() != strlen(test_string1)); fail_if (strcmp(str2.c_str(), test_string1)); gu::String<2> str3(str2); // copy ctor fail_if (str3.size() != str2.size()); fail_if (strcmp(str2.c_str(), str3.c_str())); std::string const std_string(str3.c_str()); gu::String<4> str4(std_string); // from std::string fail_if (str4.size() != strlen(test_string1)); fail_if (strcmp(str4.c_str(), test_string1)); gu::String<5> str5(test_string1, 2); fail_if (str5.size() != 2); fail_if (strncmp(str5.c_str(), test_string1, 2)); } END_TEST START_TEST (func_test) { gu::String<16> str; fail_if (str.size() != 0); fail_if (strlen(str.c_str()) != 0); const char* const buf_ptr(str.c_str()); str = "one"; str << std::string("two") << gu::String<8>("three"); fail_if (strcmp(str.c_str(), "onetwothree")); fail_if (str.c_str() != buf_ptr); str += "blast!"; // this should spill to heap fail_if (strcmp(str.c_str(), "onetwothreeblast!"), "expected 'onetwothreeblast!' got '%s'", str.c_str()); fail_if (str.c_str() == buf_ptr); str = gu::String<2>("back to stack"); fail_if (str != "back to stack"); fail_if (str != gu::String<>("back to stack")); fail_if (str != std::string("back to stack")); fail_if (str.c_str() != buf_ptr); typedef void* pointer; // conversions fail_if ((gu::String<>() << true) != "true"); fail_if ((gu::String<>() << 0.0123) != "0.012300"); if (sizeof(pointer) == 4) fail_if ((gu::String<>() << pointer(0xdeadbeef))!="0xdeadbeef"); else fail_if ((gu::String<>() << pointer(0xdeadbeef))!="0x00000000deadbeef"); fail_if ((gu::String<>() << 1234567890) != "1234567890"); fail_if ((gu::String<>() << 12345U) != "12345"); fail_if ((gu::String<>() << 'a') != "a"); fail_if ((gu::String<>() << 0xdeadbeef) != "3735928559"); fail_if ((gu::String<>() << gu::Fmt("%010x") << 0xdeadbeef) !="00deadbeef"); } END_TEST Suite* gu_string_suite(void) { Suite* s = suite_create ("gu::String"); TCase* t = tcase_create ("ctor_test"); tcase_add_test (t, ctor_test); suite_add_tcase (s, t); t = tcase_create ("func_test"); tcase_add_test (t, func_test); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_tests.c0000644000015300001660000000345313042054732020622 0ustar jenkinsjenkins// Copyright (C) 2007-2012 Codership Oy // $Id$ #include // printf() #include // strcmp() #include // EXIT_SUCCESS | EXIT_FAILURE #include #include "../src/gu_conf.h" #include "gu_mem_test.h" #include "gu_vec_test.h" #include "gu_bswap_test.h" #include "gu_fnv_test.h" #include "gu_mmh3_test.h" #include "gu_spooky_test.h" #include "gu_crc32c_test.h" #include "gu_hash_test.h" #include "gu_dbug_test.h" #include "gu_time_test.h" #include "gu_fifo_test.h" #include "gu_uuid_test.h" #include "gu_lock_step_test.h" #include "gu_str_test.h" #include "gu_utils_test.h" typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gu_mem_suite, gu_vec_suite, gu_bswap_suite, gu_fnv_suite, gu_mmh3_suite, gu_spooky_suite, gu_crc32c_suite, gu_hash_suite, gu_dbug_suite, gu_time_suite, gu_fifo_suite, gu_uuid_suite, gu_lock_step_suite, gu_str_suite, gu_utils_suite, NULL }; int main(int argc, char* argv[]) { int no_fork = ((argc > 1) && !strcmp(argv[1], "nofork")) ? 1 : 0; int i = 0; int failed = 0; FILE* log_file = NULL; if (!no_fork) { log_file = fopen ("gu_tests.log", "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); while (suites[i]) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all (sr, CK_NORMAL); failed += srunner_ntests_failed (sr); srunner_free (sr); i++; } if (log_file) { fclose (log_file); } printf ("Total tests failed: %d\n", failed); return (failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; } galera-3-25.3.20/galerautils/tests/gu_dbug_test.h0000644000015300001660000000026013042054732021436 0ustar jenkinsjenkins// Copyright (C) 2008 Codership Oy // $Id$ #ifndef __gu_dbug_test__ #define __gu_dbug_test__ Suite *gu_dbug_suite(void); #endif /* __gu_dbug_test__ */ galera-3-25.3.20/galerautils/tests/gu_tests++.cpp0000644000015300001660000000166013042054732021306 0ustar jenkinsjenkins// Copyright (C) 2009 Codership Oy #include #include #include extern "C" { #include "../src/gu_conf.h" } #include "gu_tests++.hpp" int main(int argc, char* argv[]) { bool no_fork = (argc >= 2 && std::string(argv[1]) == "nofork"); FILE* log_file = 0; if (!no_fork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); int failed = 0; for (int i = 0; suites[i] != 0; ++i) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all(sr, CK_NORMAL); failed += srunner_ntests_failed(sr); srunner_free(sr); } if (log_file != 0) fclose(log_file); printf ("Total tests failed: %d\n", failed); return failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-3-25.3.20/galerautils/tests/gu_digest_test.cpp0000644000015300001660000002171013042054732022332 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /* * This unit test is mostly to check that Galera hash definitions didn't change: * correctness of hash algorithms definitions is checked in respective unit * tests. * * By convention checks are made against etalon byte arrays, so integers must be * converted to little-endian. * * $Id$ */ #include "../src/gu_digest.hpp" #include "gu_digest_test.hpp" #include "../src/gu_hexdump.hpp" #include "../src/gu_logger.hpp" /* checks equivalence of two buffers, returns true if check fails and logs * buffer contents. */ static bool check (const void* const exp, const void* const got, ssize_t size) { if (memcmp (exp, got, size)) { log_info << "expected hash value:\n" << gu::Hexdump(exp, size) << "\nfound:\n" << gu::Hexdump(got, size) << "\n"; return true; } return false; } static const char test_msg[2048] = { 0, }; #define GU_HASH_TEST_LENGTH 43 /* some random prime */ static const uint8_t gu_hash128_check[16] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84,0x73,0x41,0x3F,0xA5,0xEB,0x27,0x40,0x2F }; static const uint8_t gu_hash64_check[8] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84 }; static const uint8_t gu_hash32_check[4] = { 0xFA,0x2C,0x78,0x67 }; /* Tests partial hashing functions */ START_TEST (gu_hash_test) { gu::Hash hash_one; hash_one.append(test_msg, GU_HASH_TEST_LENGTH); uint8_t res128_one[16]; hash_one.gather(res128_one); fail_if (check (gu_hash128_check, res128_one, sizeof(res128_one)), "gu::Hash::gather() failed in single mode."); gu::Hash::digest(test_msg, GU_HASH_TEST_LENGTH, res128_one); fail_if (check (gu_hash128_check, res128_one, sizeof(res128_one)), "gu::Hash::digest() failed."); gu::Hash hash_multi; int off = 0; hash_multi.append(test_msg, 16); off += 16; hash_multi.append(test_msg + off, 15); off += 15; hash_multi.append(test_msg + off, 7); off += 7; hash_multi.append(test_msg + off, 5); off += 5; fail_if (off != GU_HASH_TEST_LENGTH); uint8_t res128_multi[16]; hash_multi.gather(res128_multi); fail_if (check (gu_hash128_check, res128_multi, sizeof(res128_multi)), "gu::Hash::gather() failed in multi mode."); uint64_t res64; hash_multi.gather(&res64); uint64_t const res(gu_hash64(test_msg, GU_HASH_TEST_LENGTH)); fail_if (res != res64, "got 0x%0llx, expected 0x%llx", res64, res); res64 = gu_le64(res64); fail_if (check (gu_hash64_check, &res64, sizeof(res64)), "gu::Hash::gather() failed."); uint32_t res32; hash_one(res32); fail_if (gu_hash32(test_msg, GU_HASH_TEST_LENGTH) != res32); res32 = gu_le32(res32); fail_if (check (gu_hash32_check, &res32, sizeof(res32)), "gu::Hash::gather() failed."); } END_TEST static const uint8_t fast_hash128_check0 [16] = { 0xA9,0xCE,0x5A,0x56,0x0C,0x0B,0xF7,0xD6,0x63,0x4F,0x6F,0x81,0x0E,0x0B,0xF2,0x0A }; static const uint8_t fast_hash128_check511 [16] = { 0xC6,0x7F,0x4C,0xE7,0x6F,0xE0,0xDA,0x14,0xCC,0x9F,0x21,0x76,0xAF,0xB5,0x12,0x1A }; static const uint8_t fast_hash128_check512 [16] = { 0x38,0x8D,0x2B,0x90,0xC8,0x7F,0x11,0x53,0x3F,0xB4,0x32,0xC1,0xD7,0x2B,0x04,0x39 }; static const uint8_t fast_hash128_check2011[16] = { 0xB7,0xCE,0x75,0xC7,0xB4,0x31,0xBC,0xC8,0x95,0xB3,0x41,0xB8,0x5B,0x8E,0x77,0xF9 }; static const uint8_t fast_hash64_check0 [8] = { 0x6C, 0x55, 0xB8, 0xA1, 0x02, 0xC6, 0x21, 0xCA }; static const uint8_t fast_hash64_check15 [8] = { 0x28, 0x49, 0xE8, 0x34, 0x7A, 0xAB, 0x49, 0x34 }; static const uint8_t fast_hash64_check16 [8] = { 0x44, 0x40, 0x2C, 0x82, 0xD3, 0x8D, 0xAA, 0xFE }; static const uint8_t fast_hash64_check511 [8] = { 0xC6, 0x7F, 0x4C, 0xE7, 0x6F, 0xE0, 0xDA, 0x14 }; static const uint8_t fast_hash64_check512 [8] = { 0x38, 0x8D, 0x2B, 0x90, 0xC8, 0x7F, 0x11, 0x53 }; static const uint8_t fast_hash64_check2011[8] = { 0xB7, 0xCE, 0x75, 0xC7, 0xB4, 0x31, 0xBC, 0xC8 }; static const uint8_t fast_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t fast_hash32_check31 [4] = { 0x1E, 0xFF, 0x48, 0x38 }; static const uint8_t fast_hash32_check32 [4] = { 0x63, 0xC2, 0x53, 0x0D }; static const uint8_t fast_hash32_check511 [4] = { 0xC6, 0x7F, 0x4C, 0xE7 }; static const uint8_t fast_hash32_check512 [4] = { 0x38, 0x8D, 0x2B, 0x90 }; static const uint8_t fast_hash32_check2011[4] = { 0xB7, 0xCE, 0x75, 0xC7 }; /* Tests fast hash functions */ START_TEST (gu_fast_hash_test) { uint8_t res128[16]; gu::FastHash::digest (test_msg, 0, res128); fail_if (check (fast_hash128_check0, res128, sizeof(res128))); gu::FastHash::digest (test_msg, 511, res128); fail_if (check (fast_hash128_check511, res128, sizeof(res128))); gu::FastHash::digest (test_msg, 512, res128); fail_if (check (fast_hash128_check512, res128, sizeof(res128))); gu::FastHash::digest (test_msg, 2011, res128); fail_if (check (fast_hash128_check2011, res128, sizeof(res128))); uint64_t res64; res64 = gu::FastHash::digest(test_msg, 0); res64 = gu_le64(res64); fail_if (check (fast_hash64_check0, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,15); res64 = gu_le64(res64); fail_if (check (fast_hash64_check15, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,16); res64 = gu_le64(res64); fail_if (check (fast_hash64_check16, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,511); res64 =gu_le64(res64); fail_if (check (fast_hash64_check511, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,512); res64 =gu_le64(res64); fail_if (check (fast_hash64_check512, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,2011);res64 =gu_le64(res64); fail_if (check (fast_hash64_check2011, &res64, sizeof(res64))); uint32_t res32; res32 = gu::FastHash::digest(test_msg, 0); res32 = gu_le32(res32); fail_if (check (fast_hash32_check0, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,31); res32 = gu_le32(res32); fail_if (check (fast_hash32_check31, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,32); res32 = gu_le32(res32); fail_if (check (fast_hash32_check32, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,511); res32 =gu_le32(res32); fail_if (check (fast_hash32_check511, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,512); res32 =gu_le32(res32); fail_if (check (fast_hash32_check512, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,2011); res32=gu_le32(res32); fail_if (check (fast_hash32_check2011, &res32, sizeof(res32))); } END_TEST #if SKIP_TABLE_FUNCTIONS /* Tests table hash functions: * - for 64-bit platforms table hash should be identical to fast 64-bit hash, * - for 32-bit platforms table hash is different. */ #if GU_WORDSIZE == 64 START_TEST (gu_table_hash_test) { size_t res; fail_if (sizeof(res) > 8); res = gu_table_hash (test_msg, 0); res = gu_le64(res); fail_if (check (fast_hash64_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 15); res = gu_le64(res); fail_if (check (fast_hash64_check15, &res, sizeof(res))); res = gu_table_hash (test_msg, 16); res = gu_le64(res); fail_if (check (fast_hash64_check16, &res, sizeof(res))); res = gu_table_hash (test_msg, 511); res = gu_le64(res); fail_if (check (fast_hash64_check511, &res, sizeof(res))); res = gu_table_hash (test_msg, 512); res = gu_le64(res); fail_if (check (fast_hash64_check512, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le64(res); fail_if (check (fast_hash64_check2011, &res, sizeof(res))); } END_TEST #elif GU_WORDSIZE == 32 static const uint8_t table_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t table_hash32_check32 [4] = { 0x65, 0x16, 0x17, 0x42 }; static const uint8_t table_hash32_check2011[4] = { 0xF9, 0xBC, 0xEF, 0x7A }; START_TEST (gu_table_hash_test) { size_t res; fail_if (sizeof(res) > 4); res = gu_table_hash (test_msg, 0); res = gu_le32(res); fail_if (check (table_hash32_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 32); res = gu_le32(res); fail_if (check (table_hash32_check32, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le32(res); fail_if (check (table_hash32_check2011, &res, sizeof(res))); } END_TEST #else /* GU_WORDSIZE == 32 */ # error "Unsupported word size" #endif #endif // SKIP_TABLE_FUNCTIONS Suite *gu_digest_suite(void) { Suite *s = suite_create("gu::Hash"); TCase *tc = tcase_create("gu_hash"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_hash_test); tcase_add_test (tc, gu_fast_hash_test); // tcase_add_test (tc, gu_table_hash_test); return s; } galera-3-25.3.20/galerautils/tests/gu_vec_test.c0000644000015300001660000000171513042054732021273 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #include "gu_vec_test.h" #include "../src/gu_vec16.h" START_TEST (vec16_test) { gu_vec16_t v1 = gu_vec16_from_byte (0); gu_vec16_t v2 = gu_vec16_from_byte (0); gu_vec16_t v3 = gu_vec16_from_byte (7); fail_if (!gu_vec16_eq(v1, v2)); fail_if (gu_vec16_eq(v1, v3)); unsigned char a1[16], a2[16], a3[16]; fail_if (sizeof(v1) != sizeof(a1)); unsigned int i; for (i = 0; i < sizeof(a1); i++) { a1[i] = i; a2[i] = i * i; a3[i] = a1[i] ^ a2[i]; } v1 = gu_vec16_from_ptr (a1); v2 = gu_vec16_from_ptr (a2); fail_if (gu_vec16_eq(v1, v2)); v3 = gu_vec16_xor (v1, v2); fail_if (memcmp (&v3, a3, sizeof(a3))); } END_TEST Suite* gu_vec_suite(void) { TCase* t = tcase_create ("vec16"); tcase_add_test (t, vec16_test); Suite* s = suite_create ("Vector math"); suite_add_tcase (s, t); return s; } galera-3-25.3.20/galerautils/tests/gu_time_test.c0000644000015300001660000000157413042054732021457 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #include #include #include "gu_time_test.h" #include "../src/gu_time.h" START_TEST (gu_time_test) { struct timeval left = { 1, 900000 }; // 1.9 sec struct timeval right = { 5, 400000 }; // 5.4 sec double diff, tolerance = 1.0e-15; // double precision tolerance diff = gu_timeval_diff (&left, &right); fail_if (fabs(3.5 + diff) > tolerance, "Expected %f, got %f, delta: %e", -3.5, diff, 3.5 + diff); diff = gu_timeval_diff (&right, &left); fail_if (fabs(3.5 - diff) > tolerance, "Expected %f, got %f, delta: %e", 3.5, diff, 3.5 - diff); } END_TEST Suite *gu_time_suite(void) { Suite *s = suite_create("Galera time functions"); TCase *tc = tcase_create("gu_time"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_time_test); return s; } galera-3-25.3.20/galerautils/tests/gu_str_test.c0000644000015300001660000000467413042054732021335 0ustar jenkinsjenkins #include "gu_str.h" #include START_TEST(test_append) { const char* strs[3] = { "t", "ttt", "tttttttt" }; char* str = NULL; size_t off = 0; size_t i; for (i = 0; i < 3; ++i) { str = gu_str_append(str, &off, strs[i], strlen(strs[i])); } free(str); } END_TEST START_TEST(test_scan) { const char* strs[5] = { "1", "234", "56789abc", "4657777777777", "345" }; char* str = NULL; size_t off = 0; size_t len = 0; size_t i; const char* ptr; for (i = 0; i < 5; ++i) { str = gu_str_append(str, &off, strs[i], strlen(strs[i])); len += strlen(strs[i]) + 1; } ptr = str; for (i = 0; i < 5; ++i) { fail_unless(strcmp(ptr, strs[i]) == 0); ptr = gu_str_next(ptr); } fail_unless(ptr == len + str); for (i = 0; i < 5; ++i) { ptr = gu_str_advance(str, i); fail_unless(strcmp(ptr, strs[i]) == 0); } free(str); } END_TEST START_TEST(test_str_table) { size_t n_cols = 5; char const* col_names[5] = { "col1", "column2", "foo", "bar", "zzz" }; size_t n_rows = 255; const char* row[5] = {"dddd", "asdfasdf", "sadfdf", "", "a"}; const char* name = "test_table"; char* str = NULL; size_t off = 0; size_t i; str = gu_str_table_set_name(str, &off, name); fail_unless(strcmp(gu_str_table_get_name(str), name) == 0); str = gu_str_table_set_n_cols(str, &off, n_cols); fail_unless(gu_str_table_get_n_cols(str) == n_cols); str = gu_str_table_set_n_rows(str, &off, n_rows); fail_unless(gu_str_table_get_n_rows(str) == n_rows); str = gu_str_table_set_cols(str, &off, n_cols, col_names); for (i = 0; i < n_rows; ++i) { str = gu_str_table_append_row(str, &off, n_cols, row); } mark_point(); FILE* tmp = fopen("/dev/null", "w"); fail_if (NULL == tmp); gu_str_table_print(tmp, str); fclose(tmp); free(str); } END_TEST Suite* gu_str_suite() { Suite* s = suite_create("Galera Str util suite"); TCase* tc; tc = tcase_create("test_append"); tcase_add_test(tc, test_append); suite_add_tcase(s, tc); tc = tcase_create("test_scan"); tcase_add_test(tc, test_scan); suite_add_tcase(s, tc); tc = tcase_create("test_str_table"); tcase_add_test(tc, test_str_table); suite_add_tcase(s, tc); return s; } galera-3-25.3.20/galerautils/tests/gu_vector_test.hpp0000644000015300001660000000033113042054732022356 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_vector_test__ #define __gu_vector_test__ #include extern Suite *gu_vector_suite(void); #endif /* __gu_vector_test__ */ galera-3-25.3.20/galerautils/tests/gu_lock_step_test.c0000644000015300001660000000703413042054732022501 0ustar jenkinsjenkins/* * Copyright (C) 2008-2010 Codership Oy * * $Id$ */ #include #include // usleep() #include // strerror() #include "../src/gu_log.h" #include "../src/gu_lock_step.h" #include "gu_lock_step_test.h" #define TEST_USLEEP 1000 // 1ms #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (TEST_USLEEP); }} gu_lock_step_t LS; static void* lock_step_thread (void* arg) { gu_lock_step_wait (&LS); return NULL; } START_TEST (gu_lock_step_test) { const long timeout = 500; // 500 ms long ret; gu_thread_t thr1, thr2; gu_lock_step_init (&LS); fail_if (LS.wait != 0); fail_if (LS.enabled != false); // first try with lock-stepping disabled ret = gu_thread_create (&thr1, NULL, lock_step_thread, NULL); fail_if (ret != 0); WAIT_FOR(0 == LS.wait); // 10ms fail_if (LS.wait != 0); // by default lock-step is disabled ret = gu_thread_join (thr1, NULL); fail_if (ret != 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_lock_step_cont (&LS, timeout); fail_if (-1 != ret); // enable lock-step gu_lock_step_enable (&LS, true); fail_if (LS.enabled != true); ret = gu_lock_step_cont (&LS, timeout); fail_if (0 != ret); // nobody's waiting ret = gu_thread_create (&thr1, NULL, lock_step_thread, NULL); fail_if (ret != 0); WAIT_FOR(1 == LS.wait); // 10ms fail_if (LS.wait != 1); ret = gu_thread_create (&thr2, NULL, lock_step_thread, NULL); fail_if (ret != 0); WAIT_FOR(2 == LS.wait); // 10ms fail_if (LS.wait != 2); ret = gu_lock_step_cont (&LS, timeout); fail_if (ret != 2); // there were 2 waiters fail_if (LS.wait != 1); // 1 waiter remains ret = gu_lock_step_cont (&LS, timeout); fail_if (ret != 1); fail_if (LS.wait != 0); // 0 waiters remain ret = gu_thread_join (thr1, NULL); fail_if (ret != 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_thread_join (thr2, NULL); fail_if (ret != 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_lock_step_cont (&LS, timeout); fail_if (ret != 0); // there were 0 waiters fail_if (LS.wait != 0, "Expected LS.wait to be 0, found: %ld", LS.wait); gu_lock_step_destroy (&LS); } END_TEST #define RACE_ITERATIONS 1000 static void* lock_step_race (void* arg) { long i; for (i = 0; i < RACE_ITERATIONS; i++) gu_lock_step_wait (&LS); return NULL; } START_TEST (gu_lock_step_race) { const long timeout = 500; // 500 ms long ret, i; gu_thread_t thr1; gu_lock_step_init (&LS); gu_lock_step_enable (&LS, true); fail_if (LS.enabled != true); ret = gu_thread_create (&thr1, NULL, lock_step_race, NULL); fail_if (ret != 0); for (i = 0; i < RACE_ITERATIONS; i++) { ret = gu_lock_step_cont (&LS, timeout); fail_if (ret != 1, "No waiter at iteration: %ld", i); } fail_if (LS.wait != 0); // 0 waiters remain ret = gu_thread_join (thr1, NULL); fail_if (ret != 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_lock_step_cont (&LS, timeout); fail_if (ret != 0); } END_TEST Suite *gu_lock_step_suite(void) { Suite *suite = suite_create("Galera LOCK_STEP utils"); TCase *tcase = tcase_create("gu_lock_step"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gu_lock_step_test); tcase_add_test (tcase, gu_lock_step_race); return suite; } galera-3-25.3.20/galerautils/tests/gu_spooky_test.c0000644000015300001660000001743013042054732022043 0ustar jenkinsjenkins// Copyright (C) 2012 Codership Oy /*! * Original Bob Jenkins' test implementation: * http://www.burtleburtle.net/bob/c/testspooky.cpp * * $Id$ */ #include "gu_spooky_test.h" #include "../src/gu_spooky.h" #include "../src/gu_hexdump.h" #define BUFSIZE 512 static uint64_t const expected[BUFSIZE] = { 0xa24295ec, 0xfe3a05ce, 0x257fd8ef, 0x3acd5217, 0xfdccf85c, 0xc7b5f143, 0x3b0c3ff0, 0x5220f13c, 0xa6426724, 0x4d5426b4, 0x43e76b26, 0x051bc437, 0xd8f28a02, 0x23ccc30e, 0x811d1a2d, 0x039128d4, 0x9cd96a73, 0x216e6a8d, 0x97293fe8, 0xe4fc6d09, 0x1ad34423, 0x9722d7e4, 0x5a6fdeca, 0x3c94a7e1, 0x81a9a876, 0xae3f7c0e, 0x624b50ee, 0x875e5771, 0x0095ab74, 0x1a7333fb, 0x056a4221, 0xa38351fa, 0x73f575f1, 0x8fded05b, 0x9097138f, 0xbd74620c, 0x62d3f5f2, 0x07b78bd0, 0xbafdd81e, 0x0638f2ff, 0x1f6e3aeb, 0xa7786473, 0x71700e1d, 0x6b4625ab, 0xf02867e1, 0xb2b2408f, 0x9ce21ce5, 0xa62baaaf, 0x26720461, 0x434813ee, 0x33bc0f14, 0xaaab098a, 0x750af488, 0xc31bf476, 0x9cecbf26, 0x94793cf3, 0xe1a27584, 0xe80c4880, 0x1299f748, 0x25e55ed2, 0x405e3feb, 0x109e2412, 0x3e55f94f, 0x59575864, 0x365c869d, 0xc9852e6a, 0x12c30c62, 0x47f5b286, 0xb47e488d, 0xa6667571, 0x78220d67, 0xa49e30b9, 0x2005ef88, 0xf6d3816d, 0x6926834b, 0xe6116805, 0x694777aa, 0x464af25b, 0x0e0e2d27, 0x0ea92eae, 0x602c2ca9, 0x1d1d79c5, 0x6364f280, 0x939ee1a4, 0x3b851bd8, 0x5bb6f19f, 0x80b9ed54, 0x3496a9f1, 0xdf815033, 0x91612339, 0x14c516d6, 0xa3f0a804, 0x5e78e975, 0xf408bcd9, 0x63d525ed, 0xa1e459c3, 0xfde303af, 0x049fc17f, 0xe7ed4489, 0xfaeefdb6, 0x2b1b2fa8, 0xc67579a6, 0x5505882e, 0xe3e1c7cb, 0xed53bf30, 0x9e628351, 0x8fa12113, 0x7500c30f, 0xde1bee00, 0xf1fefe06, 0xdc759c00, 0x4c75e5ab, 0xf889b069, 0x695bf8ae, 0x47d6600f, 0xd2a84f87, 0xa0ca82a9, 0x8d2b750c, 0xe03d8cd7, 0x581fea33, 0x969b0460, 0x36c7b7de, 0x74b3fd20, 0x2bb8bde6, 0x13b20dec, 0xa2dcee89, 0xca36229d, 0x06fdb74e, 0x6d9a982d, 0x02503496, 0xbdb4e0d9, 0xbd1f94cf, 0x6d26f82d, 0xcf5e41cd, 0x88b67b65, 0x3e1b3ee4, 0xb20e5e53, 0x1d9be438, 0xcef9c692, 0x299bd1b2, 0xb1279627, 0x210b5f3d, 0x5569bd88, 0x9652ed43, 0x7e8e0f8c, 0xdfa01085, 0xcd6d6343, 0xb8739826, 0xa52ce9a0, 0xd33ef231, 0x1b4d92c2, 0xabfa116d, 0xcdf47800, 0x3a4eefdc, 0xd01f3bcf, 0x30a32f46, 0xfb54d851, 0x06a98f67, 0xbdcd0a71, 0x21a00949, 0xfe7049c9, 0x67ef46d2, 0xa1fabcbc, 0xa4c72db4, 0x4a8a910d, 0x85a890ad, 0xc37e9454, 0xfc3d034a, 0x6f46cc52, 0x742be7a8, 0xe94ecbc5, 0x5f993659, 0x98270309, 0x8d1adae9, 0xea6e035e, 0x293d5fae, 0x669955b3, 0x5afe23b5, 0x4c74efbf, 0x98106505, 0xfbe09627, 0x3c00e8df, 0x5b03975d, 0x78edc83c, 0x117c49c6, 0x66cdfc73, 0xfa55c94f, 0x5bf285fe, 0x2db49b7d, 0xfbfeb8f0, 0xb7631bab, 0x837849f3, 0xf77f3ae5, 0x6e5db9bc, 0xfdd76f15, 0x545abf92, 0x8b538102, 0xdd5c9b65, 0xa5adfd55, 0xecbd7bc5, 0x9f99ebdd, 0x67500dcb, 0xf5246d1f, 0x2b0c061c, 0x927a3747, 0xc77ba267, 0x6da9f855, 0x6240d41a, 0xe9d1701d, 0xc69f0c55, 0x2c2c37cf, 0x12d82191, 0x47be40d3, 0x165b35cd, 0xb7db42e1, 0x358786e4, 0x84b8fc4e, 0x92f57c28, 0xf9c8bbd7, 0xab95a33d, 0x11009238, 0xe9770420, 0xd6967e2a, 0x97c1589f, 0x2ee7e7d3, 0x32cc86da, 0xe47767d1, 0x73e9b61e, 0xd35bac45, 0x835a62bb, 0x5d9217b0, 0x43f3f0ed, 0x8a97911e, 0x4ec7eb55, 0x4b5a988c, 0xb9056683, 0x45456f97, 0x1669fe44, 0xafb861b8, 0x8e83a19c, 0x0bab08d6, 0xe6a145a9, 0xc31e5fc2, 0x27621f4c, 0x795692fa, 0xb5e33ab9, 0x1bc786b6, 0x45d1c106, 0x986531c9, 0x40c9a0ec, 0xff0fdf84, 0xa7359a42, 0xfd1c2091, 0xf73463d4, 0x51b0d635, 0x1d602fb4, 0xc56b69b7, 0x6909d3f7, 0xa04d68f4, 0x8d1001a7, 0x8ecace50, 0x21ec4765, 0x3530f6b0, 0x645f3644, 0x9963ef1e, 0x2b3c70d5, 0xa20c823b, 0x8d26dcae, 0x05214e0c, 0x1993896d, 0x62085a35, 0x7b620b67, 0x1dd85da2, 0x09ce9b1d, 0xd7873326, 0x063ff730, 0xf4ff3c14, 0x09a49d69, 0x532062ba, 0x03ba7729, 0xbd9a86cc, 0xe26d02a7, 0x7ccbe5d3, 0x4f662214, 0x8b999a66, 0x3d0b92b4, 0x70b210f0, 0xf5b8f16f, 0x32146d34, 0x430b92bf, 0x8ab6204c, 0x35e6e1ff, 0xc2f6c2fa, 0xa2df8a1a, 0x887413ec, 0x7cb7a69f, 0x7ac6dbe6, 0x9102d1cb, 0x8892a590, 0xc804fe3a, 0xdfc4920a, 0xfc829840, 0x8910d2eb, 0x38a210fd, 0x9d840cc9, 0x7b9c827f, 0x3444ca0c, 0x071735ab, 0x5e9088e4, 0xc995d60e, 0xbe0bb942, 0x17b089ae, 0x050e1054, 0xcf4324f7, 0x1e3e64dd, 0x436414bb, 0xc48fc2e3, 0x6b6b83d4, 0x9f6558ac, 0x781b22c5, 0x7147cfe2, 0x3c221b4d, 0xa5602765, 0x8f01a4f0, 0x2a9f14ae, 0x12158cb8, 0x28177c50, 0x1091a165, 0x39e4e4be, 0x3e451b7a, 0xd965419c, 0x52053005, 0x0798aa53, 0xe6773e13, 0x1207f671, 0xd2ef998b, 0xab88a38f, 0xc77a8482, 0xa88fb031, 0x5199e0cd, 0x01b30536, 0x46eeb0ef, 0x814259ff, 0x9789a8cf, 0x376ec5ac, 0x7087034a, 0x948b6bdd, 0x4281e628, 0x2c848370, 0xd76ce66a, 0xe9b6959e, 0x24321a8e, 0xdeddd622, 0xb890f960, 0xea26c00a, 0x55e7d8b2, 0xeab67f09, 0x9227fb08, 0xeebbed06, 0xcac1b0d1, 0xb6412083, 0x05d2b0e7, 0x9037624a, 0xc9702198, 0x2c8d1a86, 0x3e7d416e, 0xc3f1a39f, 0xf04bdce4, 0xc88cdb61, 0xbdc89587, 0x4d29b63b, 0x6f24c267, 0x4b529c87, 0x573f5a53, 0xdb3316e9, 0x288eb53b, 0xd2c074bd, 0xef44a99a, 0x2b404d2d, 0xf6706464, 0xfe824f4c, 0xc3debaf8, 0x12f44f98, 0x03135e76, 0xb4888e7f, 0xb6b2325d, 0x3a138259, 0x513c83ec, 0x2386d214, 0x94555500, 0xfbd1522d, 0xda2af018, 0x15b054c0, 0x5ad654e6, 0xb6ed00aa, 0xa2f2180e, 0x5f662825, 0xecd11366, 0x1de5e99d, 0x07afd2ad, 0xcf457b04, 0xe631e10b, 0x83ae8a21, 0x709f0d59, 0x3e278bf9, 0x246816db, 0x9f5e8fd3, 0xc5b5b5a2, 0xd54a9d5c, 0x4b6f2856, 0x2eb5a666, 0xfc68bdd4, 0x1ed1a7f8, 0x98a34b75, 0xc895ada9, 0x2907cc69, 0x87b0b455, 0xddaf96d9, 0xe7da15a6, 0x9298c82a, 0x72bd5cab, 0x2e2a6ad4, 0x7f4b6bb8, 0x525225fe, 0x985abe90, 0xac1fd6e1, 0xb8340f23, 0x92985159, 0x7d29501d, 0xe75dc744, 0x687501b4, 0x92077dc3, 0x58281a67, 0xe7e8e9be, 0xd0e64fd1, 0xb2eb0a30, 0x0e1feccd, 0xc0dc4a9e, 0x5c4aeace, 0x2ca5b93c, 0xee0ec34f, 0xad78467b, 0x0830e76e, 0x0df63f8b, 0x2c2dfd95, 0x9b41ed31, 0x9ff4cddc, 0x1590c412, 0x2366fc82, 0x7a83294f, 0x9336c4de, 0x2343823c, 0x5b681096, 0xf320e4c2, 0xc22b70e2, 0xb5fbfb2a, 0x3ebc2fed, 0x11af07bd, 0x429a08c5, 0x42bee387, 0x58629e33, 0xfb63b486, 0x52135fbe, 0xf1380e60, 0x6355de87, 0x2f0bb19a, 0x167f63ac, 0x507224cf, 0xf7c99d00, 0x71646f50, 0x74feb1ca, 0x5f9abfdd, 0x278f7d68, 0x70120cd7, 0x4281b0f2, 0xdc8ebe5c, 0x36c32163, 0x2da1e884, 0x61877598, 0xbef04402, 0x304db695, 0xfa8e9add, 0x503bac31, 0x0fe04722, 0xf0d59f47, 0xcdc5c595, 0x918c39dd, 0x0cad8d05, 0x6b3ed1eb, 0x4d43e089, 0x7ab051f8, 0xdeec371f, 0x0f4816ae, 0xf8a1a240, 0xd15317f6, 0xb8efbf0b, 0xcdd05df8, 0x4fd5633e, 0x7cf19668, 0x25d8f422, 0x72d156f2, 0x2a778502, 0xda7aefb9, 0x4f4f66e8, 0x19db6bff, 0x74e468da, 0xa754f358, 0x7339ec50, 0x139006f6, 0xefbd0b91, 0x217e9a73, 0x939bd79c }; START_TEST (gu_spooky_test) { uint8_t buf[BUFSIZE]; size_t i; for (i = 0; i < BUFSIZE; ++i) { uint32_t res; buf[i] = i+128; /* It looks like values for messages under bufSize are for the "short" * algorithm, incompatible with the real one. */ if (i < _spooky_bufSize) { /* using 128-bit version */ uint64_t h[2]; gu_spooky_short (buf, i, h); res = (uint32_t)gu_le64(h[0]); } else { /* using 32-bit version */ res = gu_spooky32 (buf, i); } if (res != expected[i]) { fail ("%d: expected: 0x%.8lX, found: 0x%.8lX", i, expected[i], res); } } } END_TEST Suite *gu_spooky_suite(void) { Suite *s = suite_create("Spooky hash"); TCase *tc = tcase_create("gu_spooky"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_spooky_test); return s; } galera-3-25.3.20/galerautils/tests/gu_rset_test.hpp0000644000015300001660000000031213042054732022030 0ustar jenkinsjenkins/* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_rset_test__ #define __gu_rset_test__ #include Suite *gu_rset_suite(void); #endif /* __gu_rset_test__ */ galera-3-25.3.20/galerautils/tests/gu_lock_step_test.h0000644000015300001660000000032413042054732022501 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gu_lock_step_test__ #define __gu_lock_step_test__ extern Suite *gu_lock_step_suite(void); #endif /* __gu_lock_step_test__ */ galera-3-25.3.20/gcs/0000755000015300001660000000000013042054732013712 5ustar jenkinsjenkinsgalera-3-25.3.20/gcs/src/0000755000015300001660000000000013042054732014501 5ustar jenkinsjenkinsgalera-3-25.3.20/gcs/src/gcs_state_msg.cpp0000644000015300001660000007077013042054732020042 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ */ /* * Interface to state messages - implementation * */ #define __STDC_LIMIT_MACROS #include #include #include #define GCS_STATE_MSG_VER 4 #define GCS_STATE_MSG_ACCESS #include "gcs_state_msg.hpp" #include "gcs_node.hpp" gcs_state_msg_t* gcs_state_msg_create (const gu_uuid_t* state_uuid, const gu_uuid_t* group_uuid, const gu_uuid_t* prim_uuid, gcs_seqno_t prim_seqno, gcs_seqno_t received, gcs_seqno_t cached, int prim_joined, gcs_node_state_t prim_state, gcs_node_state_t current_state, const char* name, const char* inc_addr, int gcs_proto_ver, int repl_proto_ver, int appl_proto_ver, int desync_count, uint8_t flags) { #define CHECK_PROTO_RANGE(LEVEL) \ if (LEVEL < (int)0 || LEVEL > (int)UINT8_MAX) { \ gu_error ("#LEVEL value %d is out of range [0, %d]", LEVEL,UINT8_MAX); \ return NULL; \ } CHECK_PROTO_RANGE(gcs_proto_ver); CHECK_PROTO_RANGE(repl_proto_ver); CHECK_PROTO_RANGE(appl_proto_ver); size_t name_len = strlen(name) + 1; size_t addr_len = strlen(inc_addr) + 1; gcs_state_msg_t* ret = static_cast( gu_calloc (1, sizeof (gcs_state_msg_t) + name_len + addr_len)); if (ret) { ret->state_uuid = *state_uuid; ret->group_uuid = *group_uuid; ret->prim_uuid = *prim_uuid; ret->prim_joined = prim_joined; ret->prim_seqno = prim_seqno; ret->received = received; ret->cached = cached; ret->prim_state = prim_state; ret->current_state = current_state; ret->version = GCS_STATE_MSG_VER; ret->gcs_proto_ver = gcs_proto_ver; ret->repl_proto_ver= repl_proto_ver; ret->appl_proto_ver= appl_proto_ver; ret->desync_count = desync_count; ret->name = (char*)(ret + 1); ret->inc_addr = ret->name + name_len; ret->flags = flags; // tmp is a workaround for some combination of GCC flags which don't // allow passing ret->name and ret->inc_addr directly even with casting // char* tmp = (char*)ret->name; strcpy ((char*)ret->name, name); // tmp = (char*)ret->inc_addr; strcpy ((char*)ret->inc_addr, inc_addr); } return ret; } void gcs_state_msg_destroy (gcs_state_msg_t* state) { gu_free (state); } /* Returns length needed to serialize gcs_state_msg_t for sending */ size_t gcs_state_msg_len (gcs_state_msg_t* state) { return ( sizeof (int8_t) + // version (reserved) sizeof (int8_t) + // flags sizeof (int8_t) + // gcs_proto_ver sizeof (int8_t) + // repl_proto_ver sizeof (int8_t) + // prim_state sizeof (int8_t) + // curr_state sizeof (int16_t) + // prim_joined sizeof (gu_uuid_t) + // state_uuid sizeof (gu_uuid_t) + // group_uuid sizeof (gu_uuid_t) + // conf_uuid sizeof (int64_t) + // received sizeof (int64_t) + // prim_seqno strlen (state->name) + 1 + strlen (state->inc_addr) + 1 + // V1-2 stuff sizeof (uint8_t) + // appl_proto_ver (in preparation for V1) // V3 stuff sizeof (int64_t) + // cached // V4 stuff sizeof (int32_t) // desync count ); } #define STATE_MSG_FIELDS_V0(buf) \ int8_t* version = (int8_t*)buf; \ int8_t* flags = version + 1; \ int8_t* gcs_proto_ver = flags + 1; \ int8_t* repl_proto_ver = gcs_proto_ver + 1; \ int8_t* prim_state = repl_proto_ver + 1; \ int8_t* curr_state = prim_state + 1; \ int16_t* prim_joined = (int16_t*)(curr_state + 1); \ gu_uuid_t* state_uuid = (gu_uuid_t*)(prim_joined + 1); \ gu_uuid_t* group_uuid = state_uuid + 1; \ gu_uuid_t* prim_uuid = group_uuid + 1; \ int64_t* received = (int64_t*)(prim_uuid + 1); \ int64_t* prim_seqno = received + 1; \ char* name = (char*)(prim_seqno + 1); #define CONST_STATE_MSG_FIELDS_V0(buf) \ const int8_t* version = (int8_t*)buf; \ const int8_t* flags = version + 1; \ const int8_t* gcs_proto_ver = flags + 1; \ const int8_t* repl_proto_ver = gcs_proto_ver + 1; \ const int8_t* prim_state = repl_proto_ver + 1; \ const int8_t* curr_state = prim_state + 1; \ const int16_t* prim_joined = (int16_t*)(curr_state + 1); \ const gu_uuid_t* state_uuid = (gu_uuid_t*)(prim_joined + 1); \ const gu_uuid_t* group_uuid = state_uuid + 1; \ const gu_uuid_t* prim_uuid = group_uuid + 1; \ const int64_t* received = (int64_t*)(prim_uuid + 1); \ const int64_t* prim_seqno = received + 1; \ const char* name = (char*)(prim_seqno + 1); /* Serialize gcs_state_msg_t into buf */ ssize_t gcs_state_msg_write (void* buf, const gcs_state_msg_t* state) { STATE_MSG_FIELDS_V0(buf); char* inc_addr = name + strlen (state->name) + 1; uint8_t* appl_proto_ver = (uint8_t*)(inc_addr + strlen(state->inc_addr) + 1); int64_t* cached = (int64_t*)(appl_proto_ver + 1); int32_t* desync_count = (int32_t*)(cached + 1); *version = GCS_STATE_MSG_VER; *flags = state->flags; *gcs_proto_ver = state->gcs_proto_ver; *repl_proto_ver = state->repl_proto_ver; *prim_state = state->prim_state; *curr_state = state->current_state; *prim_joined = htog16(((int16_t)state->prim_joined)); *state_uuid = state->state_uuid; *group_uuid = state->group_uuid; *prim_uuid = state->prim_uuid; *received = htog64(state->received); *prim_seqno = htog64(state->prim_seqno); strcpy (name, state->name); strcpy (inc_addr, state->inc_addr); *appl_proto_ver = state->appl_proto_ver; // in preparation for V1 *cached = htog64(state->cached); *desync_count = htog32(state->desync_count); return ((uint8_t*)(desync_count + 1) - (uint8_t*)buf); } /* De-serialize gcs_state_msg_t from buf */ gcs_state_msg_t* gcs_state_msg_read (const void* const buf, ssize_t const buf_len) { assert (buf_len > 0); /* beginning of the message is always version 0 */ CONST_STATE_MSG_FIELDS_V0(buf); const char* inc_addr = name + strlen (name) + 1; int appl_proto_ver = 0; uint8_t* appl_ptr = (uint8_t*)(inc_addr + strlen(inc_addr) + 1); if (*version >= 1) { assert(buf_len >= (uint8_t*)(appl_ptr + 1) - (uint8_t*)buf); appl_proto_ver = *appl_ptr; } int64_t cached = GCS_SEQNO_ILL; int64_t* cached_ptr = (int64_t*)(appl_ptr + 1); if (*version >= 3) { assert(buf_len >= (uint8_t*)(cached_ptr + 1) - (uint8_t*)buf); cached = gtoh64(*cached_ptr); } int32_t desync_count = 0; int32_t* desync_count_ptr = (int32_t*)(cached_ptr + 1); if (*version >= 4) { assert(buf_len >= (uint8_t*)(desync_count_ptr + 1) - (uint8_t*)buf); desync_count = gtoh32(*desync_count_ptr); } gcs_state_msg_t* ret = gcs_state_msg_create ( state_uuid, group_uuid, prim_uuid, gtoh64(*prim_seqno), gtoh64(*received), cached, gtoh16(*prim_joined), (gcs_node_state_t)*prim_state, (gcs_node_state_t)*curr_state, name, inc_addr, *gcs_proto_ver, *repl_proto_ver, appl_proto_ver, desync_count, *flags ); if (ret) ret->version = *version; // dirty hack return ret; } /* Print state message contents to buffer */ int gcs_state_msg_snprintf (char* str, size_t size, const gcs_state_msg_t* state) { str[size - 1] = '\0'; // preventive termination return snprintf (str, size - 1, "\n\tVersion : %d" "\n\tFlags : %#02hhx" "\n\tProtocols : %d / %d / %d" "\n\tState : %s" "\n\tDesync count : %d" "\n\tPrim state : %s" "\n\tPrim UUID : " GU_UUID_FORMAT "\n\tPrim seqno : %lld" "\n\tFirst seqno : %lld" "\n\tLast seqno : %lld" "\n\tPrim JOINED : %d" "\n\tState UUID : " GU_UUID_FORMAT "\n\tGroup UUID : " GU_UUID_FORMAT "\n\tName : '%s'" "\n\tIncoming addr: '%s'\n", state->version, state->flags, state->gcs_proto_ver, state->repl_proto_ver, state->appl_proto_ver, gcs_node_state_to_str(state->current_state), state->desync_count, gcs_node_state_to_str(state->prim_state), GU_UUID_ARGS(&state->prim_uuid), (long long)state->prim_seqno, (long long)state->cached, (long long)state->received, state->prim_joined, GU_UUID_ARGS(&state->state_uuid), GU_UUID_ARGS(&state->group_uuid), state->name, state->inc_addr ); } /* Get state uuid */ const gu_uuid_t* gcs_state_msg_uuid (const gcs_state_msg_t* state) { return &state->state_uuid; } /* Get group uuid */ const gu_uuid_t* gcs_state_msg_group_uuid (const gcs_state_msg_t* state) { return &state->group_uuid; } /* Get action seqno */ gcs_seqno_t gcs_state_msg_received (const gcs_state_msg_t* state) { return state->received; } /* Get first cached action seqno */ gcs_seqno_t gcs_state_msg_cached (const gcs_state_msg_t* state) { return state->cached; } /* Get current node state */ gcs_node_state_t gcs_state_msg_current_state (const gcs_state_msg_t* state) { return state->current_state; } /* Get node state */ gcs_node_state_t gcs_state_msg_prim_state (const gcs_state_msg_t* state) { return state->prim_state; } /* Get node name */ const char* gcs_state_msg_name (const gcs_state_msg_t* state) { return state->name; } /* Get node incoming address */ const char* gcs_state_msg_inc_addr (const gcs_state_msg_t* state) { return state->inc_addr; } /* Get supported protocols */ void gcs_state_msg_get_proto_ver (const gcs_state_msg_t* state, int* gcs_proto_ver, int* repl_proto_ver, int* appl_proto_ver) { *gcs_proto_ver = state->gcs_proto_ver; *repl_proto_ver = state->repl_proto_ver; *appl_proto_ver = state->appl_proto_ver; } int gcs_state_msg_get_desync_count (const gcs_state_msg_t* state) { return state->desync_count; } /* Get state message flags */ uint8_t gcs_state_msg_flags (const gcs_state_msg_t* state) { return state->flags; } /* Returns the node which is most representative of a group */ static const gcs_state_msg_t* state_nodes_compare (const gcs_state_msg_t* left, const gcs_state_msg_t* right) { assert (0 == gu_uuid_compare(&left->group_uuid, &right->group_uuid)); /* Allow GCS_SEQNO_ILL seqnos if bootstrapping from non-prim */ assert ((gcs_state_msg_flags(left) & GCS_STATE_FBOOTSTRAP) || left->prim_seqno != GCS_SEQNO_ILL); assert ((gcs_state_msg_flags(right) & GCS_STATE_FBOOTSTRAP) || right->prim_seqno != GCS_SEQNO_ILL); if (left->received < right->received) { assert (left->prim_seqno <= right->prim_seqno); return right; } else if (left->received > right->received) { assert (left->prim_seqno >= right->prim_seqno); return left; } else { // act_id's are equal, choose the one with higher prim_seqno. if (left->prim_seqno < right->prim_seqno) { return right; } else { return left; } } } /* Helper - just prints out all significant (JOINED) nodes */ static void state_report_uuids (char* buf, size_t buf_len, const gcs_state_msg_t* states[], long states_num, gcs_node_state_t min_state) { long j; for (j = 0; j < states_num; j++) { if (states[j]->current_state >= min_state) { int written = gcs_state_msg_snprintf (buf, buf_len, states[j]); buf += written; buf_len -= written; } } } #define GCS_STATE_MAX_LEN 722 #define GCS_STATE_BAD_REP ((gcs_state_msg_t*)-1) /*! checks for inherited primary configuration, returns representative * @retval (void*)-1 in case of fatal error */ static const gcs_state_msg_t* state_quorum_inherit (const gcs_state_msg_t* states[], long states_num, gcs_state_quorum_t* quorum) { /* They all must have the same group_uuid or otherwise quorum is impossible. * Of those we need to find at least one that has complete state - * status >= GCS_STATE_JOINED. If we find none - configuration is * non-primary. * Of those with the status >= GCS_STATE_JOINED we choose the most * representative: with the highest act_seqno and prim_seqno. */ long i, j; const gcs_state_msg_t* rep = NULL; // find at least one JOINED/DONOR (donor was once joined) for (i = 0; i < states_num; i++) { if (gcs_node_is_joined(states[i]->current_state)) { rep = states[i]; break; } } if (!rep) { size_t buf_len = states_num * GCS_STATE_MAX_LEN; char* buf = static_cast(gu_malloc (buf_len)); if (buf) { state_report_uuids (buf, buf_len, states, states_num, GCS_NODE_STATE_NON_PRIM); #ifdef GCS_CORE_TESTING gu_warn ("Quorum: No node with complete state:\n%s", buf); #else /* Print buf into stderr in order to message truncation * of application logger. */ gu_warn ("Quorum: No node with complete state:\n"); fprintf(stderr, "%s\n", buf); #endif /* GCS_CORE_TESTING */ gu_free (buf); } return NULL; } // Check that all JOINED/DONOR have the same group UUID // and find most updated for (j = i + 1; j < states_num; j++) { if (gcs_node_is_joined(states[j]->current_state)) { if (gu_uuid_compare (&rep->group_uuid, &states[j]->group_uuid)) { // for now just freak out and print all conflicting nodes size_t buf_len = states_num * GCS_STATE_MAX_LEN; char* buf = static_cast(gu_malloc (buf_len)); if (buf) { state_report_uuids (buf, buf_len, states, states_num, GCS_NODE_STATE_DONOR); gu_fatal("Quorum impossible: conflicting group UUIDs:\n%s"); gu_free (buf); } else { gu_fatal("Quorum impossible: conflicting group UUIDs"); } return GCS_STATE_BAD_REP; } rep = state_nodes_compare (rep, states[j]); } } quorum->act_id = rep->received; quorum->conf_id = rep->prim_seqno; quorum->group_uuid = rep->group_uuid; quorum->primary = true; return rep; } struct candidate /* remerge candidate */ { gu_uuid_t prim_uuid; // V0 compatibility (0.8.1) gu_uuid_t state_uuid; gcs_seqno_t state_seqno; const gcs_state_msg_t* rep; int prim_joined; int found; gcs_seqno_t prim_seqno; }; static bool state_match_candidate (const gcs_state_msg_t* const s, struct candidate* const c, int const state_exchange_version) { switch (state_exchange_version) { case 0: // V0 compatibility (0.8.1) return (0 == gu_uuid_compare(&s->prim_uuid, &c->prim_uuid)); default: return ((0 == gu_uuid_compare(&s->group_uuid, &c->state_uuid)) && (s->received == c->state_seqno) && // what if they are different components. // but have same group uuid and received(0) // see gh24. (s->prim_seqno == c->prim_seqno)); } } /* try to find representative remerge candidate */ static const struct candidate* state_rep_candidate (const struct candidate* const c, int const c_num) { assert (c_num > 0); const struct candidate* rep = &c[0]; gu_uuid_t const state_uuid = rep->state_uuid; gcs_seqno_t state_seqno = rep->state_seqno; gcs_seqno_t prim_seqno = rep->prim_seqno; int i; for (i = 1; i < c_num; i++) { if (!gu_uuid_compare(&c[i].state_uuid, &GU_UUID_NIL)) { /* Ignore nodes with undefined state uuid, they have been * added to group before remerge and have clean state. */ continue; } else if (gu_uuid_compare(&state_uuid, &GU_UUID_NIL) && gu_uuid_compare(&state_uuid, &c[i].state_uuid)) { /* There are candidates from different groups */ return NULL; } assert (prim_seqno != c[i].prim_seqno || state_seqno != c[i].state_seqno); if (prim_seqno < c[i].prim_seqno) { rep = &c[i]; prim_seqno = rep->prim_seqno; } else if (prim_seqno == c[i].prim_seqno && state_seqno < c[i].state_seqno) { rep = &c[i]; state_seqno = rep->state_seqno; } } return rep; } /*! checks for full prim remerge after non-prim */ static const gcs_state_msg_t* state_quorum_remerge (const gcs_state_msg_t* const states[], long const states_num, bool const bootstrap, gcs_state_quorum_t* const quorum) { struct candidate* candidates = GU_CALLOC(states_num, struct candidate); if (!candidates) { gu_error ("Quorum: could not allocate %zd bytes for re-merge check.", states_num * sizeof(struct candidate)); return NULL; } int i, j; int candidates_found = 0; /* 1. Sort and count all nodes who have ever been JOINED by primary * component UUID */ for (i = 0; i < states_num; i++) { bool cond; if (bootstrap) { cond = gcs_state_msg_flags(states[i]) & GCS_STATE_FBOOTSTRAP; if (cond) gu_debug("found node %s with bootstrap flag", gcs_state_msg_name(states[i])); } else { cond = gcs_node_is_joined(states[i]->prim_state); } if (cond) { if (!bootstrap && GCS_NODE_STATE_JOINER == states[i]->current_state) { /* Joiner always has an undefined state * (and it should be its prim_state!) */ gu_warn ("Inconsistent state message from %d (%s): current " "state is %s, but the primary state was %s.", i, states[i]->name, gcs_node_state_to_str(states[i]->current_state), gcs_node_state_to_str(states[i]->prim_state)); continue; } assert(bootstrap || gu_uuid_compare(&states[i]->prim_uuid, &GU_UUID_NIL)); for (j = 0; j < candidates_found; j++) { if (state_match_candidate (states[i], &candidates[j], quorum->version)) { assert(states[i]->prim_joined == candidates[j].prim_joined); // comment out following two lines for pc recovery // when nodes recoveried from state files, if their states // match, so candidates[j].found > 0. // However their prim_joined == 0. // assert(candidates[j].found < candidates[j].prim_joined); // assert(candidates[j].found > 0); candidates[j].found++; candidates[j].rep = state_nodes_compare (candidates[j].rep, states[i]); break; } } if (j == candidates_found) { // we don't have this candidate in the list yet candidates[j].prim_uuid = states[i]->prim_uuid; candidates[j].state_uuid = states[i]->group_uuid; candidates[j].state_seqno = states[i]->received; candidates[j].prim_joined = states[i]->prim_joined; candidates[j].rep = states[i]; candidates[j].found = 1; candidates[j].prim_seqno = states[i]->prim_seqno; candidates_found++; assert(candidates_found <= states_num); } } } const gcs_state_msg_t* rep = NULL; if (candidates_found) { assert (candidates_found > 0); const struct candidate* const rc = state_rep_candidate (candidates, candidates_found); if (!rc) { gu_error ("Found more than one %s primary component candidate.", bootstrap ? "bootstrap" : "re-merged"); rep = NULL; } else { if (bootstrap) { gu_info ("Bootstrapped primary " GU_UUID_FORMAT " found: %d.", GU_UUID_ARGS(&rc->prim_uuid), rc->found); } else { gu_info ("%s re-merge of primary " GU_UUID_FORMAT " found: " "%d of %d.", rc->found == rc->prim_joined ? "Full" : "Partial", GU_UUID_ARGS(&rc->prim_uuid), rc->found, rc->prim_joined); } rep = rc->rep; assert (NULL != rep); assert (bootstrap || gcs_node_is_joined(rep->prim_state)); quorum->act_id = rep->received; quorum->conf_id = rep->prim_seqno; quorum->group_uuid = rep->group_uuid; quorum->primary = true; } } else { assert (0 == candidates_found); gu_warn ("No %s primary component found.", bootstrap ? "bootstrapped" : "re-merged"); } gu_free (candidates); return rep; } #if 0 // REMOVE WHEN NO LONGER NEEDED FOR REFERENCE /*! Checks for prim comp bootstrap */ static const gcs_state_msg_t* state_quorum_bootstrap (const gcs_state_msg_t* const states[], long const states_num, gcs_state_quorum_t* const quorum) { struct candidate* candidates = GU_CALLOC(states_num, struct candidate); if (!candidates) { gu_error ("Quorum: could not allocate %zd bytes for re-merge check.", states_num * sizeof(struct candidate)); return NULL; } int i, j; int candidates_found = 0; /* 1. Sort and count all nodes which have bootstrap flag set */ for (i = 0; i < states_num; i++) { if (gcs_state_msg_flags(states[i]) & GCS_STATE_FBOOTSTRAP) { gu_debug("found node %s with bootstrap flag", gcs_state_msg_name(states[i])); for (j = 0; j < candidates_found; j++) { if (state_match_candidate (states[i], &candidates[j], quorum->version)) { assert(states[i]->prim_joined == candidates[j].prim_joined); assert(candidates[j].found > 0); candidates[j].found++; candidates[j].rep = state_nodes_compare (candidates[j].rep, states[i]); break; } } if (j == candidates_found) { // we don't have this candidate in the list yet candidates[j].prim_uuid = states[i]->prim_uuid; candidates[j].state_uuid = states[i]->group_uuid; candidates[j].state_seqno = states[i]->received; candidates[j].prim_joined = states[i]->prim_joined; candidates[j].rep = states[i]; candidates[j].found = 1; candidates_found++; assert(candidates_found <= states_num); } } } const gcs_state_msg_t* rep = NULL; if (candidates_found) { assert (candidates_found > 0); const struct candidate* const rc = state_rep_candidate (candidates, candidates_found); if (!rc) { gu_error ("Found more than one bootstrap primary component " "candidate."); rep = NULL; } else { gu_info ("Bootstrapped primary " GU_UUID_FORMAT " found: %d.", GU_UUID_ARGS(&rc->prim_uuid), rc->found); rep = rc->rep; assert (NULL != rep); quorum->act_id = rep->received; quorum->conf_id = rep->prim_seqno; quorum->group_uuid = rep->group_uuid; quorum->primary = true; } } else { assert (0 == candidates_found); gu_warn ("No bootstrapped primary component found."); } gu_free (candidates); return rep; } #endif // 0 /* Get quorum decision from state messages */ long gcs_state_msg_get_quorum (const gcs_state_msg_t* states[], long states_num, gcs_state_quorum_t* quorum) { assert (states_num > 0); assert (NULL != states); long i; const gcs_state_msg_t* rep = NULL; *quorum = GCS_QUORUM_NON_PRIMARY; // pessimistic assumption /* find lowest commonly supported state exchange version */ quorum->version = states[0]->version; for (i = 1; i < states_num; i++) { if (quorum->version > states[i]->version) { quorum->version = states[i]->version; } } rep = state_quorum_inherit (states, states_num, quorum); if (!quorum->primary && rep != GCS_STATE_BAD_REP) { rep = state_quorum_remerge (states, states_num, false, quorum); } if (!quorum->primary && rep != GCS_STATE_BAD_REP) { rep = state_quorum_remerge (states, states_num, true, quorum); } if (!quorum->primary) { gu_error ("Failed to establish quorum."); return 0; } assert (rep != NULL); // select the highest commonly supported protocol: min(proto_max) #define INIT_PROTO_VER(LEVEL) quorum->LEVEL = rep->LEVEL INIT_PROTO_VER(gcs_proto_ver); INIT_PROTO_VER(repl_proto_ver); INIT_PROTO_VER(appl_proto_ver); for (i = 0; i < states_num; i++) { #define CHECK_MIN_PROTO_VER(LEVEL) \ if (states[i]->LEVEL < quorum->LEVEL) { \ quorum->LEVEL = states[i]->LEVEL; \ } // if (!gu_uuid_compare(&states[i]->group_uuid, &quorum->group_uuid)) { CHECK_MIN_PROTO_VER(gcs_proto_ver); CHECK_MIN_PROTO_VER(repl_proto_ver); CHECK_MIN_PROTO_VER(appl_proto_ver); // } } if (quorum->version < 2) {;} // for future generations if (quorum->version < 1) { // appl_proto_ver is not supported by all members assert (quorum->repl_proto_ver <= 1); if (1 == quorum->repl_proto_ver) quorum->appl_proto_ver = 1; else quorum->appl_proto_ver = 0; } return 0; } galera-3-25.3.20/gcs/src/gcs_group.cpp0000644000015300001660000014702513042054732017206 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ */ #include "gcs_group.hpp" #include "gcs_gcache.hpp" #include "gcs_priv.hpp" #include const char* gcs_group_state_str[GCS_GROUP_STATE_MAX] = { "NON_PRIMARY", "WAIT_STATE_UUID", "WAIT_STATE_MSG", "PRIMARY" }; int gcs_group_init (gcs_group_t* group, gcache_t* const cache, const char* node_name, const char* inc_addr, gcs_proto_t const gcs_proto_ver, int const repl_proto_ver, int const appl_proto_ver) { // here we also create default node instance. group->cache = cache; group->act_id_ = GCS_SEQNO_ILL; group->conf_id = GCS_SEQNO_ILL; group->state_uuid = GU_UUID_NIL; group->group_uuid = GU_UUID_NIL; group->num = 1; // this must be removed (#474) group->my_idx = 0; // this must be -1 (#474) group->my_name = strdup(node_name ? node_name : NODE_NO_NAME); group->my_address = strdup(inc_addr ? inc_addr : NODE_NO_ADDR); group->state = GCS_GROUP_NON_PRIMARY; group->last_applied = GCS_SEQNO_ILL; // mark for recalculation group->last_node = -1; group->frag_reset = true; // just in case group->nodes = GU_CALLOC(group->num, gcs_node_t); // this must be removed (#474) if (!group->nodes) return -ENOMEM; // this should be removed (#474) /// this should be removed (#474) gcs_node_init (&group->nodes[group->my_idx], group->cache, NODE_NO_ID, group->my_name, group->my_address, gcs_proto_ver, repl_proto_ver, appl_proto_ver, 0); group->prim_uuid = GU_UUID_NIL; group->prim_seqno = GCS_SEQNO_ILL; group->prim_num = 0; group->prim_state = GCS_NODE_STATE_NON_PRIM; *(gcs_proto_t*)&group->gcs_proto_ver = gcs_proto_ver; *(int*)&group->repl_proto_ver = repl_proto_ver; *(int*)&group->appl_proto_ver = appl_proto_ver; group->quorum = GCS_QUORUM_NON_PRIMARY; group->last_applied_proto_ver = -1; return 0; } int gcs_group_init_history (gcs_group_t* group, gcs_seqno_t seqno, const gu_uuid_t* uuid) { bool const negative_seqno(seqno < 0); bool const nil_uuid(!gu_uuid_compare (uuid, &GU_UUID_NIL)); if (negative_seqno && !nil_uuid) { gu_error ("Non-nil history UUID with negative seqno (%lld) makes " "no sense.", (long long) seqno); return -EINVAL; } else if (!negative_seqno && nil_uuid) { gu_error ("Non-negative state seqno requires non-nil history UUID."); return -EINVAL; } group->act_id_ = seqno; group->group_uuid = *uuid; return 0; } /* Initialize nodes array from component message */ static inline gcs_node_t* group_nodes_init (const gcs_group_t* group, const gcs_comp_msg_t* comp) { const long my_idx = gcs_comp_msg_self (comp); const long nodes_num = gcs_comp_msg_num (comp); gcs_node_t* ret = GU_CALLOC (nodes_num, gcs_node_t); long i; if (ret) { for (i = 0; i < nodes_num; i++) { const gcs_comp_memb_t* memb = gcs_comp_msg_member(comp, i); assert(NULL != memb); if (my_idx != i) { gcs_node_init (&ret[i], group->cache, memb->id, NULL, NULL, -1, -1, -1, memb->segment); } else { // this node gcs_node_init (&ret[i], group->cache, memb->id, group->my_name, group->my_address, group->gcs_proto_ver, group->repl_proto_ver, group->appl_proto_ver, memb->segment); } } } else { gu_error ("Could not allocate %ld x %z bytes", nodes_num, sizeof(gcs_node_t)); } return ret; } /* Free nodes array */ static void group_nodes_free (gcs_group_t* group) { int i; /* cleanup after disappeared members */ for (i = 0; i < group->num; i++) { gcs_node_free (&group->nodes[i]); } if (group->nodes) gu_free (group->nodes); group->nodes = NULL; group->num = 0; group->my_idx = -1; } void gcs_group_free (gcs_group_t* group) { if (group->my_name) free ((char*)group->my_name); if (group->my_address) free ((char*)group->my_address); group_nodes_free (group); } /* Reset nodes array without breaking the statistics */ static inline void group_nodes_reset (gcs_group_t* group) { int i; /* reset recv_acts at the nodes */ for (i = 0; i < group->num; i++) { if (i != group->my_idx) { gcs_node_reset (&group->nodes[i]); } else { gcs_node_reset_local (&group->nodes[i]); } } group->frag_reset = true; } /* Find node with the smallest last_applied */ static inline void group_redo_last_applied (gcs_group_t* group) { long n; long last_node = -1; gu_seqno_t last_applied = GU_LLONG_MAX; for (n = 0; n < group->num; n++) { const gcs_node_t* const node = &group->nodes[n]; gcs_seqno_t const seqno = node->last_applied; bool count = node->count_last_applied; if (gu_unlikely (0 == group->last_applied_proto_ver)) { /* @note: this may be removed after quorum v1 is phased out */ count = (GCS_NODE_STATE_SYNCED == node->status || GCS_NODE_STATE_DONOR == node->status); } // gu_debug ("last_applied[%ld]: %lld", n, seqno); /* NOTE: It is crucial for consistency that last_applied algorithm * is absolutely identical on all nodes. Therefore for the * generality sake and future compatibility we have to assume * non-blocking donor. * GCS_BLOCKING_DONOR should never be defined unless in some * very custom builds. Commenting it out for safety sake. */ //#ifndef GCS_BLOCKING_DONOR if (count //#else // if ((GCS_NODE_STATE_SYNCED == node->status) /* ignore donor */ //#endif && (seqno < last_applied)) { assert (seqno >= 0); last_applied = seqno; last_node = n; } // extra diagnostic, ignore //else if (!count) { gu_warn("not counting %d", n); } } if (gu_likely (last_node >= 0)) { group->last_applied = last_applied; group->last_node = last_node; } } static void group_go_non_primary (gcs_group_t* group) { if (group->my_idx >= 0) { assert(group->num > 0); assert(group->nodes); group->nodes[group->my_idx].status = GCS_NODE_STATE_NON_PRIM; //@todo: Perhaps the same has to be applied to the rest of the nodes[]? } else { assert(-1 == group->my_idx); assert(0 == group->num); assert(NULL == group->nodes); } group->state = GCS_GROUP_NON_PRIMARY; group->conf_id = GCS_SEQNO_ILL; // what else? Do we want to change anything about the node here? } static const char group_empty_id[GCS_COMP_MEMB_ID_MAX_LEN + 1] = { 0, }; static void group_check_donor (gcs_group_t* group) { gcs_node_state_t const my_state = group->nodes[group->my_idx].status; const char* const donor_id = group->nodes[group->my_idx].donor; if (GCS_NODE_STATE_JOINER == my_state && memcmp (donor_id, group_empty_id, sizeof(group_empty_id))) { long i; for (i = 0; i < group->num; i++) { if (i != group->my_idx && !memcmp (donor_id, group->nodes[i].id, sizeof (group->nodes[i].id))) return; } gu_warn ("Donor %s is no longer in the group. State transfer cannot " "be completed, need to abort. Aborting...", donor_id); gu_abort(); } return; } /*! Processes state messages and sets group parameters accordingly */ static void group_post_state_exchange (gcs_group_t* group) { const gcs_state_msg_t* states[group->num]; gcs_state_quorum_t* quorum = &group->quorum; bool new_exchange = gu_uuid_compare (&group->state_uuid, &GU_UUID_NIL); long i; /* Collect state messages from nodes. */ /* Looping here every time is suboptimal, but simply counting state messages * is not straightforward too: nodes may disappear, so the final count may * include messages from the disappeared nodes. * Let's put it this way: looping here is reliable and not that expensive.*/ for (i = 0; i < group->num; i++) { states[i] = group->nodes[i].state_msg; if (NULL == states[i] || (new_exchange && gu_uuid_compare (&group->state_uuid, gcs_state_msg_uuid(states[i])))) return; // not all states from THIS state exch. received, wait } gu_debug ("STATE EXCHANGE: " GU_UUID_FORMAT " complete.", GU_UUID_ARGS(&group->state_uuid)); gcs_state_msg_get_quorum (states, group->num, quorum); if (quorum->version >= 0) { if (quorum->version < 2) { group->last_applied_proto_ver = 0; } else { group->last_applied_proto_ver = 1; } } else { gu_fatal ("Negative quorum version: %d", quorum->version); gu_abort(); } // Update each node state based on quorum outcome: // is it up to date, does it need SST and stuff for (i = 0; i < group->num; i++) { gcs_node_update_status (&group->nodes[i], quorum); } if (quorum->primary) { // primary configuration if (new_exchange) { // new state exchange happened if (!gu_uuid_compare(&group->group_uuid, &quorum->group_uuid) && group->act_id_ > quorum->act_id) { gu_fatal("Reversing history: %lld -> %lld, this member has " "applied %lld more events than the primary component." "Data loss is possible. Aborting.", (long long)group->act_id_, (long long)quorum->act_id, (long long)(group->act_id_ - quorum->act_id)); gu_abort(); } group->state = GCS_GROUP_PRIMARY; group->act_id_ = quorum->act_id; group->conf_id = quorum->conf_id + 1; group->group_uuid = quorum->group_uuid; group->prim_uuid = group->state_uuid; group->state_uuid = GU_UUID_NIL; } else { // no state exchange happend, processing old state messages assert (GCS_GROUP_PRIMARY == group->state); group->conf_id++; } group->prim_seqno = group->conf_id; group->prim_num = 0; for (i = 0; i < group->num; i++) { group->prim_num += gcs_node_is_joined (group->nodes[i].status); } assert (group->prim_num > 0); } else { // non-primary configuration group_go_non_primary (group); } gu_info ("Quorum results:" "\n\tversion = %u," "\n\tcomponent = %s," "\n\tconf_id = %lld," "\n\tmembers = %d/%d (joined/total)," "\n\tact_id = %lld," "\n\tlast_appl. = %lld," "\n\tprotocols = %d/%d/%d (gcs/repl/appl)," "\n\tgroup UUID = " GU_UUID_FORMAT, quorum->version, quorum->primary ? "PRIMARY" : "NON-PRIMARY", quorum->conf_id, group->prim_num, group->num, quorum->act_id, group->last_applied, quorum->gcs_proto_ver, quorum->repl_proto_ver, quorum->appl_proto_ver, GU_UUID_ARGS(&quorum->group_uuid)); group_check_donor(group); } // does basic sanity check of the component message (in response to #145) static void group_check_comp_msg (bool prim, long my_idx, long members) { if (my_idx >= 0) { if (my_idx < members) return; } else { if (!prim && (0 == members)) return; } gu_fatal ("Malformed component message from backend: " "%s, idx = %ld, members = %ld", prim ? "PRIMARY" : "NON-PRIMARY", my_idx, members); assert (0); gu_abort (); } gcs_group_state_t gcs_group_handle_comp_msg (gcs_group_t* group, const gcs_comp_msg_t* comp) { long new_idx, old_idx; gcs_node_t* new_nodes = NULL; ulong new_memb = 0; const bool prim_comp = gcs_comp_msg_primary (comp); const bool bootstrap = gcs_comp_msg_bootstrap(comp); const long new_my_idx = gcs_comp_msg_self (comp); const long new_nodes_num = gcs_comp_msg_num (comp); group_check_comp_msg (prim_comp, new_my_idx, new_nodes_num); if (new_my_idx >= 0) { gu_info ("New COMPONENT: primary = %s, bootstrap = %s, my_idx = %ld, " "memb_num = %ld", prim_comp ? "yes" : "no", bootstrap ? "yes" : "no", new_my_idx, new_nodes_num); new_nodes = group_nodes_init (group, comp); if (!new_nodes) { gu_fatal ("Could not allocate memory for %ld-node component.", gcs_comp_msg_num (comp)); assert(0); return (gcs_group_state_t)-ENOMEM; } if (GCS_GROUP_PRIMARY == group->state) { gu_debug ("#281: Saving %s over %s", gcs_node_state_to_str(group->nodes[group->my_idx].status), gcs_node_state_to_str(group->prim_state)); group->prim_state = group->nodes[group->my_idx].status; } } else { // Self-leave message gu_info ("Received self-leave message."); assert (0 == new_nodes_num); assert (!prim_comp); } if (prim_comp) { /* Got PRIMARY COMPONENT - Hooray! */ assert (new_my_idx >= 0); if (group->state == GCS_GROUP_PRIMARY) { /* we come from previous primary configuration, relax */ } else if (bootstrap) { /* Is there need to initialize something else in this case? */ group->nodes[group->my_idx].bootstrap = true; } else { const bool first_component = #ifndef GCS_CORE_TESTING (1 == group->num) && !strcmp (NODE_NO_ID, group->nodes[0].id); #else (1 == group->num); #endif if (1 == new_nodes_num && first_component) { /* bootstrap new configuration */ assert (GCS_GROUP_NON_PRIMARY == group->state); assert (1 == group->num); assert (0 == group->my_idx); // This bootstraps initial primary component for state exchange gu_uuid_generate (&group->prim_uuid, NULL, 0); group->prim_seqno = 0; group->prim_num = 1; group->state = GCS_GROUP_PRIMARY; if (group->act_id_ < 0) { // no history provided: start a new one group->act_id_ = GCS_SEQNO_NIL; gu_uuid_generate (&group->group_uuid, NULL, 0); gu_info ("Starting new group from scratch: " GU_UUID_FORMAT, GU_UUID_ARGS(&group->group_uuid)); } // the following should be removed under #474 group->nodes[0].status = GCS_NODE_STATE_JOINED; /* initialize node ID to the one given by the backend - this way * we'll be recognized as coming from prev. conf. in node array * remap below */ strncpy ((char*)group->nodes[0].id, new_nodes[0].id, sizeof (new_nodes[0].id) - 1); group->nodes[0].segment = new_nodes[0].segment; } } } else { group_go_non_primary (group); } /* Remap old node array to new one to preserve action continuity */ for (new_idx = 0; new_idx < new_nodes_num; new_idx++) { /* find member index in old component by unique member id */ for (old_idx = 0; old_idx < group->num; old_idx++) { // just scan through old group if (!strcmp(group->nodes[old_idx].id, new_nodes[new_idx].id)) { /* the node was in previous configuration with us */ /* move node context to new node array */ gcs_node_move (&new_nodes[new_idx], &group->nodes[old_idx]); break; } } /* if wasn't found in new configuration, new member - * need to do state exchange */ new_memb |= (old_idx == group->num); } /* free old nodes array */ group_nodes_free (group); group->my_idx = new_my_idx; group->num = new_nodes_num; group->nodes = new_nodes; if (gcs_comp_msg_primary(comp) || bootstrap) { /* TODO: for now pretend that we always have new nodes and perform * state exchange because old states can carry outdated node status. * (also protocol voting needs to be redone) * However this means aborting ongoing actions. Find a way to avoid * this extra state exchange. Generate new state messages on behalf * of other nodes? see #238 */ new_memb = true; /* if new nodes joined, reset ongoing actions and state messages */ if (new_memb) { group_nodes_reset (group); group->state = GCS_GROUP_WAIT_STATE_UUID; group->state_uuid = GU_UUID_NIL; // prepare for state exchange } else { if (GCS_GROUP_PRIMARY == group->state) { /* since we don't have any new nodes since last PRIMARY, we skip state exchange */ group_post_state_exchange (group); } } group_redo_last_applied (group); } return group->state; } gcs_group_state_t gcs_group_handle_uuid_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { assert (msg->size == sizeof(gu_uuid_t)); if (GCS_GROUP_WAIT_STATE_UUID == group->state && 0 == msg->sender_idx /* check that it is from the representative */) { group->state_uuid = *(gu_uuid_t*)msg->buf; group->state = GCS_GROUP_WAIT_STATE_MSG; } else { gu_warn ("Stray state UUID msg: " GU_UUID_FORMAT " from node %ld (%s), current group state %s", GU_UUID_ARGS((gu_uuid_t*)msg->buf), msg->sender_idx, group->nodes[msg->sender_idx].name, gcs_group_state_str[group->state]); } return group->state; } static void group_print_state_debug(gcs_state_msg_t* state) { size_t str_len = 1024; char state_str[str_len]; gcs_state_msg_snprintf (state_str, str_len, state); gu_info ("%s", state_str); } gcs_group_state_t gcs_group_handle_state_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { if (GCS_GROUP_WAIT_STATE_MSG == group->state) { gcs_state_msg_t* state = gcs_state_msg_read (msg->buf, msg->size); if (state) { const gu_uuid_t* state_uuid = gcs_state_msg_uuid (state); if (!gu_uuid_compare(&group->state_uuid, state_uuid)) { gu_info ("STATE EXCHANGE: got state msg: " GU_UUID_FORMAT " from %d (%s)", GU_UUID_ARGS(state_uuid), msg->sender_idx, gcs_state_msg_name(state)); if (gu_log_debug) group_print_state_debug(state); gcs_node_record_state (&group->nodes[msg->sender_idx], state); group_post_state_exchange (group); } else { gu_debug ("STATE EXCHANGE: stray state msg: " GU_UUID_FORMAT " from node %ld (%s), current state UUID: " GU_UUID_FORMAT, GU_UUID_ARGS(state_uuid), msg->sender_idx, gcs_state_msg_name(state), GU_UUID_ARGS(&group->state_uuid)); if (gu_log_debug) group_print_state_debug(state); gcs_state_msg_destroy (state); } } else { gu_warn ("Could not parse state message from node %d", msg->sender_idx, group->nodes[msg->sender_idx].name); } } return group->state; } /*! Returns new last applied value if it has changes, 0 otherwise */ gcs_seqno_t gcs_group_handle_last_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { gcs_seqno_t seqno; assert (GCS_MSG_LAST == msg->type); assert (sizeof(gcs_seqno_t) == msg->size); seqno = gcs_seqno_gtoh(*(gcs_seqno_t*)(msg->buf)); // This assert is too restrictive. It requires application to send // last applied messages while holding TO, otherwise there's a race // between threads. // assert (seqno >= group->last_applied); gcs_node_set_last_applied (&group->nodes[msg->sender_idx], seqno); if (msg->sender_idx == group->last_node && seqno > group->last_applied) { /* node that was responsible for the last value, has changed it. * need to recompute it */ gcs_seqno_t old_val = group->last_applied; group_redo_last_applied (group); if (old_val < group->last_applied) { gu_debug ("New COMMIT CUT %lld after %lld from %d", (long long)group->last_applied, (long long)seqno, msg->sender_idx); return group->last_applied; } } return 0; } /*! return true if this node is the sender to notify the calling thread of * success */ int gcs_group_handle_join_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { int const sender_idx = msg->sender_idx; gcs_node_t* sender = &group->nodes[sender_idx]; assert (GCS_MSG_JOIN == msg->type); // TODO: define an explicit type for the join message, like gcs_join_msg_t assert (msg->size == sizeof(gcs_seqno_t)); if (GCS_NODE_STATE_DONOR == sender->status || GCS_NODE_STATE_JOINER == sender->status) { int j; gcs_seqno_t seqno = gcs_seqno_gtoh(*(gcs_seqno_t*)msg->buf); gcs_node_t* peer = NULL; const char* peer_id = NULL; const char* peer_name = "left the group"; int peer_idx = -1; bool from_donor = false; const char* st_dir = NULL; // state transfer direction symbol if (GCS_NODE_STATE_DONOR == sender->status) { peer_id = sender->joiner; from_donor = true; st_dir = "to"; assert (group->last_applied_proto_ver >= 0); if (0 == group->last_applied_proto_ver) { /* #454 - we don't switch to JOINED here, * instead going straignt to SYNCED */ } else { assert(sender->count_last_applied); assert(sender->desync_count > 0); sender->desync_count -= 1; if (0 == sender->desync_count) sender->status = GCS_NODE_STATE_JOINED; } } else { peer_id = sender->donor; st_dir = "from"; if (group->quorum.version < 2) { // #591 remove after quorum v1 is phased out sender->status = GCS_NODE_STATE_JOINED; group->prim_num++; } else { if (seqno >= 0) { sender->status = GCS_NODE_STATE_JOINED; group->prim_num++; } else { sender->status = GCS_NODE_STATE_PRIM; } } } // Try to find peer. for (j = 0; j < group->num; j++) { // #483 if (j == sender_idx) continue; if (!memcmp(peer_id, group->nodes[j].id, sizeof (group->nodes[j].id))) { peer_idx = j; peer = &group->nodes[peer_idx]; peer_name = peer->name; break; } } if (j == group->num) { gu_warn ("Could not find peer: %s", peer_id); } if (seqno < 0) { gu_warn ("%d.%d (%s): State transfer %s %d.%d (%s) failed: %d (%s)", sender_idx, sender->segment, sender->name, st_dir, peer_idx, peer ? peer->segment : -1, peer_name, (int)seqno, strerror((int)-seqno)); if (from_donor && peer_idx == group->my_idx && GCS_NODE_STATE_JOINER == group->nodes[peer_idx].status) { // this node will be waiting for SST forever. If it has only // one recv thread there is no (generic) way to wake it up. gu_fatal ("Will never receive state. Need to abort."); // return to core to shutdown the backend before aborting return -ENOTRECOVERABLE; } if (group->quorum.version < 2 && !from_donor && // #591 sender_idx == group->my_idx) { // remove after quorum v1 is phased out gu_fatal ("Faield to receive state. Need to abort."); return -ENOTRECOVERABLE; } } else { if (sender_idx == peer_idx) { if (GCS_NODE_STATE_JOINED == sender->status) { gu_info ("Member %d.%d (%s) resyncs itself to group", sender_idx, sender->segment, sender->name); } else { assert(sender->desync_count > 0); return 0; // don't deliver up } } else { gu_info ("%d.%d (%s): State transfer %s %d.%d (%s) complete.", sender_idx, sender->segment, sender->name, st_dir, peer_idx, peer ? peer->segment : -1, peer_name); } } } else { if (GCS_NODE_STATE_PRIM == sender->status) { gu_warn("Rejecting JOIN message from %d.%d (%s): new State Transfer" " required.", sender_idx, sender->segment, sender->name); } else { // should we freak out and throw an error? gu_warn("Protocol violation. JOIN message sender %d.%d (%s) is not " "in state transfer (%s). Message ignored.", sender_idx, sender->segment, sender->name, gcs_node_state_to_str(sender->status)); } return 0; } return (sender_idx == group->my_idx); } int gcs_group_handle_sync_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { int const sender_idx = msg->sender_idx; gcs_node_t* sender = &group->nodes[sender_idx]; assert (GCS_MSG_SYNC == msg->type); if (GCS_NODE_STATE_JOINED == sender->status || /* #454 - at this layer we jump directly from DONOR to SYNCED */ (0 == group->last_applied_proto_ver && GCS_NODE_STATE_DONOR == sender->status)) { sender->status = GCS_NODE_STATE_SYNCED; sender->count_last_applied = true; group_redo_last_applied (group);//from now on this node must be counted gu_info ("Member %d.%d (%s) synced with group.", sender_idx, sender->segment, sender->name); return (sender_idx == group->my_idx); } else { if (GCS_NODE_STATE_SYNCED == sender->status) { gu_debug ("Redundant SYNC message from %d.%d (%s).", sender_idx, sender->segment, sender->name); } else if (GCS_NODE_STATE_DONOR == sender->status) { // this is possible with quick succession of desync()/resync() calls gu_debug ("SYNC message from %d.%d (%s, DONOR). Ignored.", sender_idx, sender->segment, sender->name); } else { gu_warn ("SYNC message from non-JOINED %d.%d (%s, %s). Ignored.", sender_idx, sender->segment, sender->name, gcs_node_state_to_str(sender->status)); } /* signal sender that it didn't work */ return -ERESTART * (sender_idx == group->my_idx); } } static inline bool group_node_is_stateful (const gcs_group_t* group, const gcs_node_t* node) { if (group->quorum.version < 3) { return strcmp (node->name, GCS_ARBITRATOR_NAME); } else { return ((gcs_node_flags(node) & GCS_STATE_ARBITRATOR) == 0); } } static int group_find_node_by_state (const gcs_group_t* const group, int const joiner_idx, gcs_node_state_t const status) { gcs_segment_t const segment = group->nodes[joiner_idx].segment; int idx; int donor = -1; bool hnss = false; /* have nodes in the same segment */ for (idx = 0; idx < group->num; idx++) { if (joiner_idx == idx) continue; /* skip joiner */ gcs_node_t* node = &group->nodes[idx]; if (node->status >= status && group_node_is_stateful (group, node)) donor = idx; /* potential donor */ if (segment == node->segment) { if (donor == idx) return donor; /* found suitable donor in the * same segment */ if (node->status >= GCS_NODE_STATE_JOINER) hnss = true;; } } /* Have not found suitable donor in the same segment. */ if (!hnss && donor >= 0) { if (joiner_idx == group->my_idx) { gu_warn ("There are no nodes in the same segment that will ever " "be able to become donors, yet there is a suitable donor " "outside. Will use that one."); } return donor; } else { /* wait for a suitable donor to appear in the same segment */ return -EAGAIN; } } static int group_find_node_by_name (const gcs_group_t* const group, int const joiner_idx, const char* const name, int const name_len, gcs_node_state_t const status) { int idx; for (idx = 0; idx < group->num; idx++) { gcs_node_t* node = &group->nodes[idx]; if (!strncmp(node->name, name, name_len)) { if (joiner_idx == idx) { return -EHOSTDOWN; } else if (node->status >= status) { return idx; } else if (node->status >= GCS_NODE_STATE_JOINER) { /* will eventually become SYNCED */ return -EAGAIN; } else { /* technically we could return -EDEADLK here, but as long as * it is not -EAGAIN, it does not matter. If the node is in a * PRIMARY state, it is as good as not found. */ break; } } } return -EHOSTUNREACH; } /* Calls group_find_node_by_name() for each name in comma-separated list, * falls back to group_find_node_by_state() if name (or list) is empty. */ static int group_for_each_donor_in_string (const gcs_group_t* const group, int const str_version, int const joiner_idx, const char* const str, int const str_len, gcs_node_state_t const status) { assert (str != NULL); const char* begin = str; const char* end; int err = -EHOSTDOWN; /* worst error */ /* dangling comma */ bool const dcomma = (str_len && str[str_len-1] == ',' && str_version >= 2); do { end = strchr(begin, ','); int len; if (NULL == end) { len = str_len - (begin - str); } else { len = end - begin; } assert (len >= 0); int idx; if (len > 0) { idx = group_find_node_by_name (group, joiner_idx, begin, len, status); } else { if (err == -EAGAIN && !dcomma) { /* -EAGAIN here means that at least one of the nodes in the * list will be available later, so don't try others. * (Proto 1 UPDATE: unless there is a dangling comma) */ idx = err; } else { idx = group_find_node_by_state(group, joiner_idx, status); } } if (idx >= 0) return idx; /* once we hit -EAGAIN, don't try to change error code: this means * that at least one of the nodes in the list will become available. */ if (-EAGAIN != err) err = idx; begin = end + 1; /* skip comma */ } while (end != NULL); return err; } static gcs_seqno_t group_lowest_cached_seqno(const gcs_group_t* const group) { gcs_seqno_t ret = GCS_SEQNO_ILL; int idx = 0; for (idx = 0; idx < group->num; idx++) { gcs_seqno_t seq = gcs_node_cached(&group->nodes[idx]); if (seq != GCS_SEQNO_ILL) { if (ret == GCS_SEQNO_ILL || seq < ret) { ret = seq; } } } return ret; } static int group_find_ist_donor_by_name (const gcs_group_t* const group, int joiner_idx, const char* name, int name_len, gcs_seqno_t ist_seqno, gcs_node_state_t status) { int idx = 0; for (idx = 0; idx < group->num; idx++) { gcs_node_t* node = &group->nodes[idx]; gcs_seqno_t cached = gcs_node_cached(node); if (strncmp(node->name, name, name_len) == 0 && joiner_idx != idx && node->status >= status && cached != GCS_SEQNO_ILL && // ist potentially possible (ist_seqno + 1) >= cached) { return idx; } } return -1; } static int group_find_ist_donor_by_name_in_string ( const gcs_group_t* const group, int joiner_idx, const char* str, int str_len, gcs_seqno_t ist_seqno, gcs_node_state_t status) { assert (str != NULL); const char* begin = str; const char* end; gu_debug("ist_seqno[%lld]", (long long)ist_seqno); // return the highest cached seqno node. int ret = -1; do { end = strchr(begin, ','); int len = 0; if (end == NULL) { len = str_len - (begin - str); } else { len = end - begin; } assert (len >= 0); if (len == 0) break; int idx = group_find_ist_donor_by_name( group, joiner_idx, begin, len, ist_seqno, status); if (idx >= 0) { if (ret == -1 || gcs_node_cached(&group->nodes[idx]) >= gcs_node_cached(&group->nodes[ret])) { ret = idx; } } begin = end + 1; } while (end != NULL); if (ret == -1) { gu_debug("not found"); } else { gu_debug("found. name[%s], seqno[%lld]", group->nodes[ret].name, (long long)gcs_node_cached(&group->nodes[ret])); } return ret; } static int group_find_ist_donor_by_state (const gcs_group_t* const group, int joiner_idx, gcs_seqno_t ist_seqno, gcs_node_state_t status) { gcs_node_t* joiner = &group->nodes[joiner_idx]; gcs_segment_t joiner_segment = joiner->segment; // find node who is ist potentially possible. // first highest cached seqno local node. // then highest cached seqno remote node. int idx = 0; int local_idx = -1; int remote_idx = -1; for (idx = 0; idx < group->num; idx++) { if (joiner_idx == idx) continue; gcs_node_t* const node = &group->nodes[idx]; gcs_seqno_t const node_cached = gcs_node_cached(node); if (node->status >= status && group_node_is_stateful(group, node) && node_cached != GCS_SEQNO_ILL && node_cached <= (ist_seqno + 1)) { int* const idx_ptr = (joiner_segment == node->segment) ? &local_idx : &remote_idx; if (*idx_ptr == -1 || node_cached >= gcs_node_cached(&group->nodes[*idx_ptr])) { *idx_ptr = idx; } } } if (local_idx >= 0) { gu_debug("local found. name[%s], seqno[%lld]", group->nodes[local_idx].name, (long long)gcs_node_cached(&group->nodes[local_idx])); return local_idx; } if (remote_idx >= 0) { gu_debug("remote found. name[%s], seqno[%lld]", group->nodes[remote_idx].name, (long long)gcs_node_cached(&group->nodes[remote_idx])); return remote_idx; } gu_debug("not found."); return -1; } static int group_find_ist_donor (const gcs_group_t* const group, int str_version, int joiner_idx, const char* str, int str_len, gcs_seqno_t ist_seqno, gcs_node_state_t status) { int idx = -1; gcs_seqno_t conf_seqno = group->quorum.act_id; gcs_seqno_t lowest_cached_seqno = group_lowest_cached_seqno(group); if (lowest_cached_seqno == GCS_SEQNO_ILL) { gu_debug("fallback to sst. lowest_cached_seqno == GCS_SEQNO_ILL"); return -1; } gcs_seqno_t const max_cached_range = conf_seqno - lowest_cached_seqno; gcs_seqno_t safety_gap = max_cached_range >> 7; /* 1.0 / 128 ~= 0.008 */ safety_gap = safety_gap < (1 << 20) ? safety_gap : (1 << 20); /* Be sensible and don't reserve more than 1M */ gcs_seqno_t safe_ist_seqno = lowest_cached_seqno + safety_gap; gu_debug("ist_seqno[%lld], lowest_cached_seqno[%lld]," "conf_seqno[%lld], safe_ist_seqno[%lld]", (long long)ist_seqno, (long long)lowest_cached_seqno, (long long)conf_seqno, (long long)safe_ist_seqno); if (ist_seqno < safe_ist_seqno) { // unsafe to perform ist. gu_debug("fallback to sst. ist_seqno < safe_ist_seqno"); return -1; } if (str_len) { // find ist donor by name. idx = group_find_ist_donor_by_name_in_string( group, joiner_idx, str, str_len, ist_seqno, status); if (idx >= 0) return idx; } // find ist donor by status. idx = group_find_ist_donor_by_state( group, joiner_idx, ist_seqno, status); if (idx >= 0) return idx; return -1; } int gcs_group_find_donor(const gcs_group_t* group, int const str_version, int const joiner_idx, const char* const donor_string, int const donor_len, const gu_uuid_t* ist_uuid, gcs_seqno_t ist_seqno) { static gcs_node_state_t const min_donor_state = GCS_NODE_STATE_SYNCED; /* try to find ist donor first. if it fails, fallbacks to find sst donor*/ int donor_idx = -1; if (str_version >= 2 && gu_uuid_compare(&group->group_uuid, ist_uuid) == 0) { assert (ist_seqno != GCS_SEQNO_ILL); donor_idx = group_find_ist_donor(group, str_version, joiner_idx, donor_string, donor_len, ist_seqno, min_donor_state); } if (donor_idx < 0) { /* if donor_string is empty, it will fallback to find_node_by_state() */ donor_idx = group_for_each_donor_in_string (group, str_version, joiner_idx, donor_string, donor_len, min_donor_state); } return donor_idx; } /*! * Selects and returns the index of state transfer donor, if available. * Updates donor and joiner status if state transfer is possible * * @return * donor index or negative error code: * -EHOSTUNREACH if reqiested donor is not available * -EAGAIN if there were no nodes in the proper state. */ static int group_select_donor (gcs_group_t* group, int const str_version, int const joiner_idx, const char* const donor_string, const gu_uuid_t* ist_uuid, gcs_seqno_t ist_seqno, bool const desync) { static gcs_node_state_t const min_donor_state = GCS_NODE_STATE_SYNCED; int donor_idx; int const donor_len = strlen(donor_string); bool const required_donor = (donor_len > 0); if (desync) { /* sender wants to become "donor" itself */ assert(donor_len > 0); gcs_node_state_t const st(group->nodes[joiner_idx].status); if (st >= min_donor_state || (st >= GCS_NODE_STATE_DONOR && group->quorum.version >= 4)) { donor_idx = joiner_idx; gcs_node_t& donor(group->nodes[donor_idx]); assert(donor.desync_count == 0 || group->quorum.version >= 4); assert(donor.desync_count == 0 || st == GCS_NODE_STATE_DONOR); (void)donor; // keep optimised build happy } else donor_idx = -EAGAIN; } else { donor_idx = gcs_group_find_donor(group, str_version, joiner_idx, donor_string, donor_len, ist_uuid, ist_seqno); } if (donor_idx >= 0) { assert(donor_idx != joiner_idx || desync); gcs_node_t* const joiner = &group->nodes[joiner_idx]; gcs_node_t* const donor = &group->nodes[donor_idx]; donor->desync_count += 1; if (desync && 1 == donor->desync_count) { gu_info ("Member %d.%d (%s) desyncs itself from group", donor_idx, donor->segment, donor->name); } else if (!desync) { gu_info ("Member %d.%d (%s) requested state transfer from '%s'. " "Selected %d.%d (%s)(%s) as donor.", joiner_idx, joiner->segment, joiner->name, required_donor ? donor_string : "*any*", donor_idx, donor->segment, donor->name, gcs_node_state_to_str(donor->status)); } // reserve donor, confirm joiner (! assignment order is significant !) joiner->status = GCS_NODE_STATE_JOINER; donor->status = GCS_NODE_STATE_DONOR; if (1 == donor->desync_count) { /* SST or first desync */ memcpy (donor->joiner, joiner->id, GCS_COMP_MEMB_ID_MAX_LEN+1); memcpy (joiner->donor, donor->id, GCS_COMP_MEMB_ID_MAX_LEN+1); } else { assert(true == desync); } } else { gu_warn ("Member %d.%d (%s) requested state transfer from '%s', " "but it is impossible to select State Transfer donor: %s", joiner_idx, group->nodes[joiner_idx].segment, group->nodes[joiner_idx].name, required_donor ? donor_string : "*any*", strerror (-donor_idx)); } return donor_idx; } /* Cleanup ignored state request */ void gcs_group_ignore_action (gcs_group_t* group, struct gcs_act_rcvd* act) { if (act->act.type <= GCS_ACT_STATE_REQ) { gcs_gcache_free (group->cache, act->act.buf); } act->act.buf = NULL; act->act.buf_len = 0; act->act.type = GCS_ACT_ERROR; act->sender_idx = -1; assert (GCS_SEQNO_ILL == act->id); } static bool group_desync_request (const char* const donor) { return (strlen (GCS_DESYNC_REQ) == strlen(donor) && !strcmp(GCS_DESYNC_REQ, donor)); } /* NOTE: check gcs_request_state_transfer() for sender part. */ /*! Returns 0 if request is ignored, request size if it should be passed up */ int gcs_group_handle_state_request (gcs_group_t* group, struct gcs_act_rcvd* act) { // pass only to sender and to one potential donor const char* donor_name = (const char*)act->act.buf; size_t donor_name_len = strlen(donor_name); int donor_idx = -1; int const joiner_idx = act->sender_idx; const char* joiner_name = group->nodes[joiner_idx].name; gcs_node_state_t joiner_status = group->nodes[joiner_idx].status; bool const desync = group_desync_request (donor_name); gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; int str_version = 1; // actually it's 0 or 1. if (act->act.buf_len != (ssize_t)(donor_name_len + 1) && donor_name[donor_name_len + 1] == 'V') { str_version = (int)donor_name[donor_name_len + 2]; } if (str_version >= 2) { const char* ist_buf = donor_name + donor_name_len + 3; memcpy(&ist_uuid, ist_buf, sizeof(ist_uuid)); ist_seqno = gcs_seqno_gtoh(*(gcs_seqno_t*)(ist_buf + sizeof(ist_uuid))); // change act.buf's content to original version. // and it's safe to change act.buf_len size_t head = donor_name_len + 3 + sizeof(ist_uuid) + sizeof(ist_seqno); memmove((char*)act->act.buf + donor_name_len + 1, (char*)act->act.buf + head, act->act.buf_len - head); act->act.buf_len -= sizeof(ist_uuid) + sizeof(ist_seqno) + 2; } assert (GCS_ACT_STATE_REQ == act->act.type); if (joiner_status != GCS_NODE_STATE_PRIM && !desync) { const char* joiner_status_string = gcs_node_state_to_str(joiner_status); if (group->my_idx == joiner_idx) { gu_error ("Requesting state transfer while in %s. " "Ignoring.", joiner_status_string); act->id = -ECANCELED; return act->act.buf_len; } else { gu_error ("Member %d.%d (%s) requested state transfer, " "but its state is %s. Ignoring.", joiner_idx, group->nodes[joiner_idx].segment, joiner_name, joiner_status_string); gcs_group_ignore_action (group, act); return 0; } } donor_idx = group_select_donor(group, str_version, joiner_idx, donor_name, &ist_uuid, ist_seqno, desync); assert (donor_idx != joiner_idx || desync || donor_idx < 0); assert (donor_idx == joiner_idx || !desync || donor_idx < 0); if (group->my_idx != joiner_idx && group->my_idx != donor_idx) { // if neither DONOR nor JOINER, ignore request gcs_group_ignore_action (group, act); return 0; } else if (group->my_idx == donor_idx) { act->act.buf_len -= donor_name_len + 1; memmove (*(void**)&act->act.buf, ((char*)act->act.buf) + donor_name_len + 1, act->act.buf_len); // now action starts with request, like it was supplied by application, // see gcs_request_state_transfer() } // Return index of donor (or error) in the seqno field to sender. // It will be used to detect error conditions (no availabale donor, // donor crashed and the like). // This may be ugly, well, any ideas? act->id = donor_idx; return act->act.buf_len; } static ssize_t group_memb_record_size (gcs_group_t* group) { ssize_t ret = 0; long idx; for (idx = 0; idx < group->num; idx++) { ret += strlen(group->nodes[idx].id) + 1; ret += strlen(group->nodes[idx].name) + 1; ret += strlen(group->nodes[idx].inc_addr) + 1; ret += sizeof(gcs_seqno_t); // cached seqno } return ret; } /* Creates new configuration action */ ssize_t gcs_group_act_conf (gcs_group_t* group, struct gcs_act* act, int* gcs_proto_ver) { // if (*gcs_proto_ver < group->quorum.gcs_proto_ver) // *gcs_proto_ver = group->quorum.gcs_proto_ver; // only go up, see #482 // else if (group->quorum.gcs_proto_ver >= 0 && // group->quorum.gcs_proto_ver < *gcs_proto_ver) { // gu_warn ("Refusing GCS protocol version downgrade from %d to %d", // *gcs_proto_ver, group->quorum.gcs_proto_ver); // } // actually we allow gcs protocol version downgrade. // because if message version is inconsistent with gcs protocol version // gcs requires resending message with correct gcs protocol version. *gcs_proto_ver = group->quorum.gcs_proto_ver; ssize_t conf_size = sizeof(gcs_act_conf_t) + group_memb_record_size(group); gcs_act_conf_t* conf = static_cast(malloc (conf_size)); if (conf) { long idx; conf->seqno = group->act_id_; conf->conf_id = group->conf_id; conf->memb_num = group->num; conf->my_idx = group->my_idx; conf->repl_proto_ver = group->quorum.repl_proto_ver; conf->appl_proto_ver = group->quorum.appl_proto_ver; memcpy (conf->uuid, &group->group_uuid, sizeof (gu_uuid_t)); if (group->num) { assert (conf->my_idx >= 0); conf->my_state = group->nodes[group->my_idx].status; char* ptr = &conf->data[0]; for (idx = 0; idx < group->num; idx++) { strcpy (ptr, group->nodes[idx].id); ptr += strlen(ptr) + 1; strcpy (ptr, group->nodes[idx].name); ptr += strlen(ptr) + 1; strcpy (ptr, group->nodes[idx].inc_addr); ptr += strlen(ptr) + 1; gcs_seqno_t cached = gcs_node_cached(&group->nodes[idx]); memcpy(ptr, &cached, sizeof(cached)); ptr += sizeof(cached); } } else { // self leave message assert (conf->conf_id < 0); assert (conf->my_idx < 0); conf->my_state = GCS_NODE_STATE_NON_PRIM; } act->buf = conf; act->buf_len = conf_size; act->type = GCS_ACT_CONF; return conf_size; } else { return -ENOMEM; } } // for future use in fake state exchange (in unit tests et.al. See #237, #238) static gcs_state_msg_t* group_get_node_state (const gcs_group_t* const group, long const node_idx) { const gcs_node_t* const node = &group->nodes[node_idx]; uint8_t flags = 0; if (0 == node_idx) flags |= GCS_STATE_FREP; if (node->count_last_applied) flags |= GCS_STATE_FCLA; if (node->bootstrap) flags |= GCS_STATE_FBOOTSTRAP; #ifdef GCS_FOR_GARB flags |= GCS_STATE_ARBITRATOR; int64_t const cached = GCS_SEQNO_ILL; #else int64_t const cached = /* group->cache check is needed for unit tests */ group->cache ? gcache_seqno_min(group->cache) : GCS_SEQNO_ILL; #endif /* GCS_FOR_GARB */ return gcs_state_msg_create ( &group->state_uuid, &group->group_uuid, &group->prim_uuid, group->prim_seqno, group->act_id_, cached, group->prim_num, group->prim_state, node->status, node->name, node->inc_addr, node->gcs_proto_ver, node->repl_proto_ver, node->appl_proto_ver, node->desync_count, flags ); } /*! Returns state message object for this node */ gcs_state_msg_t* gcs_group_get_state (const gcs_group_t* group) { return group_get_node_state (group, group->my_idx); } void gcs_group_get_status (const gcs_group_t* group, gu::Status& status) { int desync_count; // make sure it is not initialized if (gu_likely(group->my_idx >= 0)) { const gcs_node_t& this_node(group->nodes[group->my_idx]); desync_count = this_node.desync_count; } else { desync_count = 0; } status.insert("desync_count", gu::to_string(desync_count)); } galera-3-25.3.20/gcs/src/gcs_state_msg.hpp0000644000015300001660000001333713042054732020043 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ */ /* * Interface to state messages * */ #ifndef _gcs_state_msg_h_ #define _gcs_state_msg_h_ #include "gcs.hpp" #include "gcs_seqno.hpp" #include "gcs_act_proto.hpp" #include #include /* State flags */ #define GCS_STATE_FREP 0x01 // group representative #define GCS_STATE_FCLA 0x02 // count last applied (for JOINED node) #define GCS_STATE_FBOOTSTRAP 0x04 // part of prim bootstrap process #define GCS_STATE_ARBITRATOR 0x08 // arbitrator or otherwise incomplete node #ifdef GCS_STATE_MSG_ACCESS typedef struct gcs_state_msg { gu_uuid_t state_uuid; // UUID of the current state exchange gu_uuid_t group_uuid; // UUID of the group gu_uuid_t prim_uuid; // last PC state UUID gcs_seqno_t prim_seqno; // last PC state seqno gcs_seqno_t received; // last action seqno (received up to) gcs_seqno_t cached; // earliest action cached const char* name; // human assigned node name const char* inc_addr; // incoming address string int version; // version of state message int gcs_proto_ver; int repl_proto_ver; int appl_proto_ver; int prim_joined; // number of joined nodes in its last PC int desync_count; gcs_node_state_t prim_state; // state of the node in its last PC gcs_node_state_t current_state; // current state of the node uint8_t flags; } gcs_state_msg_t; #else typedef struct gcs_state_msg gcs_state_msg_t; #endif /*! Quorum decisions */ typedef struct gcs_state_quorum { gu_uuid_t group_uuid; //! group UUID gcs_seqno_t act_id; //! next global seqno gcs_seqno_t conf_id; //! configuration id bool primary; //! primary configuration or not int version; //! state excahnge version (max understood by all) int gcs_proto_ver; int repl_proto_ver; int appl_proto_ver; } gcs_state_quorum_t; #define GCS_QUORUM_NON_PRIMARY (gcs_state_quorum_t){ \ GU_UUID_NIL, \ GCS_SEQNO_ILL, \ GCS_SEQNO_ILL, \ false, \ -1, -1, -1, -1 \ } extern gcs_state_msg_t* gcs_state_msg_create (const gu_uuid_t* state_uuid, const gu_uuid_t* group_uuid, const gu_uuid_t* prim_uuid, gcs_seqno_t prim_seqno, gcs_seqno_t received, gcs_seqno_t cached, int prim_joined, gcs_node_state_t prim_state, gcs_node_state_t current_state, const char* name, const char* inc_addr, int gcs_proto_ver, int repl_proto_ver, int appl_proto_ver, int desync_count, uint8_t flags); extern void gcs_state_msg_destroy (gcs_state_msg_t* state); /* Returns length needed to serialize gcs_state_msg_t for sending */ extern size_t gcs_state_msg_len (gcs_state_msg_t* state); /* Serialize gcs_state_msg_t into message */ extern ssize_t gcs_state_msg_write (void* msg, const gcs_state_msg_t* state); /* De-serialize gcs_state_msg_t from message */ extern gcs_state_msg_t* gcs_state_msg_read (const void* msg, ssize_t msg_len); /* Get state uuid */ extern const gu_uuid_t* gcs_state_msg_uuid (const gcs_state_msg_t* state); /* Get group uuid */ extern const gu_uuid_t* gcs_state_msg_group_uuid (const gcs_state_msg_t* state); /* Get last PC uuid */ //extern const gu_uuid_t* //gcs_state_prim_uuid (const gcs_state_msg_t* state); /* Get last received action seqno */ extern gcs_seqno_t gcs_state_msg_received (const gcs_state_msg_t* state); /* Get lowest cached action seqno */ extern gcs_seqno_t gcs_state_msg_cached (const gcs_state_msg_t* state); /* Get current node state */ extern gcs_node_state_t gcs_state_msg_current_state (const gcs_state_msg_t* state); /* Get last prim node state */ extern gcs_node_state_t gcs_state_msg_prim_state (const gcs_state_msg_t* state); /* Get node name */ extern const char* gcs_state_msg_name (const gcs_state_msg_t* state); /* Get node incoming address */ extern const char* gcs_state_msg_inc_addr (const gcs_state_msg_t* state); /* Get supported protocols */ extern void gcs_state_msg_get_proto_ver (const gcs_state_msg_t* state, int* gcs_proto_ver, int* repl_proto_ver, int* appl_proto_ver); /* Get desync count */ extern int gcs_state_msg_get_desync_count(const gcs_state_msg_t* state); /* Get state message flags */ extern uint8_t gcs_state_msg_flags (const gcs_state_msg_t* state); /*! Get quorum decision from state messages * * @param[in] states array of state message pointers * @param[in] states_num length of array * @param[out] quorum quorum calculations result * @retval 0 if there were no errors during processing. Quorum results are in * quorum parameter */ extern long gcs_state_msg_get_quorum (const gcs_state_msg_t* states[], long states_num, gcs_state_quorum_t* quorum); /* Print state message contents to buffer */ extern int gcs_state_msg_snprintf (char* str, size_t size, const gcs_state_msg_t* msg); #endif /* _gcs_state_msg_h_ */ galera-3-25.3.20/gcs/src/gcs_act_proto.cpp0000644000015300001660000000772713042054732020050 0ustar jenkinsjenkins/* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * Interface to action protocol * (to be extended to support protocol versions, currently supports only v0) */ #include #include "gcs_act_proto.hpp" /* Version 0 header structure bytes: 00 01 07 08 11 12 15 16 19 20 +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--- |PV| act_id | act_size | frag_no |AT|reserved| data... +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--- PV - protocol version AT - action type */ static const size_t PROTO_PV_OFFSET = 0; static const size_t PROTO_AT_OFFSET = 16; static const size_t PROTO_DATA_OFFSET = 20; // static const size_t PROTO_ACT_ID_OFFSET = 0; // static const size_t PROTO_ACT_SIZE_OFFSET = 8; // static const size_t PROTO_FRAG_NO_OFFSET = 12; // static const gcs_seqno_t PROTO_ACT_ID_MAX = 0x00FFFFFFFFFFFFLL; // static const unsigned int PROTO_FRAG_NO_MAX = 0xFFFFFFFF; // static const unsigned char PROTO_AT_MAX = 0xFF; static const int PROTO_VERSION = GCS_ACT_PROTO_MAX; #define PROTO_MAX_HDR_SIZE PROTO_DATA_OFFSET // for now /*! Writes header data into actual header of the message. * Remainig fragment buf and length is in frag->frag and frag->frag_len * * @return 0 on success */ long gcs_act_proto_write (gcs_act_frag_t* frag, void* buf, size_t buf_len) { #ifdef GCS_DEBUG_PROTO if ((frag->act_id > PROTO_ACT_ID_MAX) || (frag->act_size > GCS_MAX_ACT_SIZE) || (frag->frag_no > PROTO_FRAG_NO_MAX) || (frag->act_type > PROTO_AT_MAX)) { gu_error ("Exceeded protocol limits: %d(%d), %d(%d), %d(%d), %d(%d)", frag->act_id, PROTO_ACT_ID_MAX, frag->act_size, GCS_MAX_ACT_SIZE, frag->frag_no, PROTO_FRAG_NO_MAX, frag->act_type, PROTO_AT_MAX); return -EOVERFLOW; } if (frag->proto_ver != PROTO_VERSION) return -EPROTO; if (buf_len < PROTO_DATA_OFFSET) return -EMSGSIZE; #endif // assert (frag->act_size <= PROTO_ACT_SIZE_MAX); ((uint64_t*)buf)[0] = gu_be64(frag->act_id); ((uint32_t*)buf)[2] = htogl ((uint32_t)frag->act_size); ((uint32_t*)buf)[3] = htogl (frag->frag_no); ((uint8_t *)buf)[PROTO_PV_OFFSET] = frag->proto_ver; ((uint8_t *)buf)[PROTO_AT_OFFSET] = frag->act_type; frag->frag = (uint8_t*)buf + PROTO_DATA_OFFSET; frag->frag_len = buf_len - PROTO_DATA_OFFSET; return 0; } /*! Reads header data from the actual header of the message * Remainig fragment buf and length is in frag->frag and frag->frag_len * * @return 0 on success */ long gcs_act_proto_read (gcs_act_frag_t* frag, const void* buf, size_t buf_len) { frag->proto_ver = ((uint8_t*)buf)[PROTO_PV_OFFSET]; if (gu_unlikely(buf_len < PROTO_DATA_OFFSET)) { gu_error ("Action message too short: %zu, expected at least %d", buf_len, PROTO_DATA_OFFSET); return -EBADMSG; } if (gu_unlikely(frag->proto_ver > PROTO_VERSION)) { gu_error ("Bad protocol version %d, expected %d", frag->proto_ver, PROTO_VERSION); return -EPROTO; // this fragment should be dropped } ((uint8_t*)buf)[PROTO_PV_OFFSET] = 0x0; frag->act_id = gu_be64(*(uint64_t*)buf); frag->act_size = gtohl (((uint32_t*)buf)[2]); frag->frag_no = gtohl (((uint32_t*)buf)[3]); frag->act_type = static_cast( ((uint8_t*)buf)[PROTO_AT_OFFSET]); frag->frag = ((uint8_t*)buf) + PROTO_DATA_OFFSET; frag->frag_len = buf_len - PROTO_DATA_OFFSET; /* return 0 or -EMSGSIZE */ return ((frag->act_size > GCS_MAX_ACT_SIZE) * -EMSGSIZE); } /*! Returns protocol header size */ long gcs_act_proto_hdr_size (long version) { if (gu_unlikely(GCS_ACT_PROTO_MAX < version)) return -EPROTONOSUPPORT; if (gu_unlikely(version < 0)) return PROTO_MAX_HDR_SIZE; // safe return PROTO_DATA_OFFSET; } galera-3-25.3.20/gcs/src/gcs_node.cpp0000644000015300001660000001655713042054732017004 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ */ #include #include "gcs_node.hpp" /*! Initialize node context */ void gcs_node_init (gcs_node_t* const node, gcache_t* cache, const char* const id, const char* const name, const char* const inc_addr, int const gcs_proto_ver, int const repl_proto_ver, int const appl_proto_ver, gcs_segment_t const segment) { assert(strlen(id) > 0); assert(strlen(id) < sizeof(node->id)); memset (node, 0, sizeof (gcs_node_t)); strncpy ((char*)node->id, id, sizeof(node->id) - 1); node->bootstrap = false; node->status = GCS_NODE_STATE_NON_PRIM; node->name = strdup (name ? name : NODE_NO_NAME); node->inc_addr = strdup (inc_addr ? inc_addr : NODE_NO_ADDR); gcs_defrag_init (&node->app, cache); // GCS_ACT_TORDERED goes only here gcs_defrag_init (&node->oob, NULL); node->gcs_proto_ver = gcs_proto_ver; node->repl_proto_ver = repl_proto_ver; node->appl_proto_ver = appl_proto_ver; node->segment = segment; } /*! Move data from one node object to another */ void gcs_node_move (gcs_node_t* dst, gcs_node_t* src) { if (dst->name) free ((char*)dst->name); if (dst->inc_addr) free ((char*)dst->inc_addr); if (dst->state_msg) gcs_state_msg_destroy ((gcs_state_msg_t*)dst->state_msg); memcpy (dst, src, sizeof (gcs_node_t)); gcs_defrag_forget (&src->app); gcs_defrag_forget (&src->oob); src->name = NULL; src->inc_addr = NULL; src->state_msg = NULL; } /*! Mark node's buffers as reset (local node only) */ void gcs_node_reset_local (gcs_node_t* node) { gcs_defrag_reset (&node->app); gcs_defrag_reset (&node->oob); } /*! Reset node's receive buffers */ void gcs_node_reset (gcs_node_t* node) { gcs_defrag_free (&node->app); gcs_defrag_free (&node->oob); gcs_node_reset_local (node); } /*! Deallocate resources associated with the node object */ void gcs_node_free (gcs_node_t* node) { gcs_node_reset (node); if (node->name) { free ((char*)node->name); // was strdup'ed node->name = NULL; } if (node->inc_addr) { free ((char*)node->inc_addr); // was strdup'ed node->inc_addr = NULL; } if (node->state_msg) { gcs_state_msg_destroy ((gcs_state_msg_t*)node->state_msg); node->state_msg = NULL; } } /*! Record state message from the node */ void gcs_node_record_state (gcs_node_t* node, gcs_state_msg_t* state_msg) { if (node->state_msg) { gcs_state_msg_destroy ((gcs_state_msg_t*)node->state_msg); } node->state_msg = state_msg; // copy relevant stuff from state msg into node node->status = gcs_state_msg_current_state (state_msg); gcs_state_msg_get_proto_ver (state_msg, &node->gcs_proto_ver, &node->repl_proto_ver, &node->appl_proto_ver); if (node->name) free ((char*)node->name); node->name = strdup (gcs_state_msg_name (state_msg)); if (node->inc_addr) free ((char*)node->inc_addr); node->inc_addr = strdup (gcs_state_msg_inc_addr (state_msg)); } /*! Update node status according to quorum decisions */ void gcs_node_update_status (gcs_node_t* node, const gcs_state_quorum_t* quorum) { if (quorum->primary) { const gu_uuid_t* node_group_uuid = gcs_state_msg_group_uuid ( node->state_msg); const gu_uuid_t* quorum_group_uuid = &quorum->group_uuid; // TODO: what to do when quorum.proto is not supported by this node? if (!gu_uuid_compare (node_group_uuid, quorum_group_uuid)) { // node was a part of this group gcs_seqno_t node_act_id = gcs_state_msg_received (node->state_msg); if (node_act_id == quorum->act_id) { const gcs_node_state_t last_prim_state = gcs_state_msg_prim_state (node->state_msg); if (GCS_NODE_STATE_NON_PRIM == last_prim_state) { // the node just joined, but already is up to date: node->status = GCS_NODE_STATE_JOINED; gu_debug ("#281 Setting %s state to %s", node->name, gcs_node_state_to_str(node->status)); } else { // Keep node state from the previous primary comp. node->status = last_prim_state; gu_debug ("#281,#298 Carry over last prim state for %s: %s", node->name, gcs_node_state_to_str(node->status)); } } else { // gap in sequence numbers, needs a snapshot, demote status if (node->status > GCS_NODE_STATE_PRIM) { gu_info ("'%s' demoted %s->PRIMARY due to gap in history: " "%lld - %lld", node->name, gcs_node_state_to_str(node->status), node_act_id, quorum->act_id); } node->status = GCS_NODE_STATE_PRIM; } } else { // node joins completely different group, clear all status if (node->status > GCS_NODE_STATE_PRIM) { gu_info ("'%s' has a different history, demoted %s->PRIMARY", node->name, gcs_node_state_to_str(node->status)); } node->status = GCS_NODE_STATE_PRIM; } switch (node->status) { case GCS_NODE_STATE_DONOR: if (quorum->version >= 4) { node->desync_count = gcs_state_msg_get_desync_count(node->state_msg); assert(node->desync_count > 0); } else { node->desync_count = 1; } case GCS_NODE_STATE_SYNCED: node->count_last_applied = true; break; case GCS_NODE_STATE_JOINED: node->count_last_applied =(gcs_state_msg_flags (node->state_msg) & GCS_STATE_FCLA); break; case GCS_NODE_STATE_JOINER: case GCS_NODE_STATE_PRIM: node->count_last_applied = false; break; case GCS_NODE_STATE_NON_PRIM: case GCS_NODE_STATE_MAX: gu_fatal ("Internal logic error: state %d in " "primary configuration. Aborting.", node->status); abort(); break; } if (GCS_NODE_STATE_DONOR != node->status) { assert(0 ==node->desync_count || GCS_NODE_STATE_PRIM==node->status); node->desync_count = 0; } else { assert(node->desync_count > 0); } } else { /* Probably don't want to change anything here, quorum was a failure * anyway. This could be due to this being transient component, lacking * joined nodes from the configuraiton. May be next component will be * better. * * UPDATE (28.06.2011): as #477 shows, we need some consistency here: */ node->status = GCS_NODE_STATE_NON_PRIM; } /* Clear bootstrap flag so that it does not get carried to * subsequent configuration changes. */ node->bootstrap = false; } galera-3-25.3.20/gcs/src/gcs_fc.cpp0000644000015300001660000001516413042054732016440 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy * * $Id$ */ /*! @file This unit contains Flow Control parts deemed worthy to be * taken out of gcs.c */ #include "gcs_fc.hpp" #include #include double const gcs_fc_hard_limit_fix = 0.9; //! allow for some overhead static double const min_sleep = 0.001; //! minimum sleep period (s) /*! Initializes operational constants before opening connection to group * @return -EINVAL if wrong values are submitted */ int gcs_fc_init (gcs_fc_t* fc, ssize_t hard_limit, // slave queue hard limit double soft_limit, // soft limit as a fraction of hard limit double max_throttle) { assert (fc); if (hard_limit < 0) { gu_error ("Bad value for slave queue hard limit: %zd (should be > 0)", hard_limit); return -EINVAL; } if (soft_limit < 0.0 || soft_limit >= 1.0) { gu_error ("Bad value for slave queue soft limit: %f " "(should belong to [0.0,1.0) )", soft_limit); return -EINVAL; } if (max_throttle < 0.0 || max_throttle >= 1.0) { gu_error ("Bad value for max throttle: %f " "(should belong to [0.0,1.0) )", max_throttle); return -EINVAL; } memset (fc, 0, sizeof(*fc)); fc->hard_limit = hard_limit; fc->soft_limit = fc->hard_limit * soft_limit; fc->max_throttle = max_throttle; return 0; } /*! Reinitializes object at the beginning of state transfer */ void gcs_fc_reset (gcs_fc_t* const fc, ssize_t const queue_size) { assert (fc != NULL); assert (queue_size >= 0); fc->init_size = queue_size; fc->size = fc->init_size; fc->start = gu_time_monotonic(); fc->last_sleep = 0; fc->act_count = 0; fc->max_rate = -1.0; fc->scale = 0.0; fc->offset = 0.0; fc->sleep_count= 0; fc->sleeps = 0.0; } /* * The idea here is that there is no flow control up until slave queue size * reaches soft limit. * After that flow control gradually slows down replication rate by emitting FC * events in order to buy more time for state transfer. * Replication rate goes linearly from normal rate at soft limit to max_throttle * fraction at hard limit, at which point -ENOMEM is returned as replication * becomes prohibitively slow. * * replication * speed * ^ * |--------. <- normal replication rate * | .\ * | . \ * | . \ * | . \ speed = fc->size * fc->scale + fc->offset * | . \ * | . \ * | . \ | * | . \ | * | . \| * | . + <- throttle limit * | . | * | . | * +--------+---------+----> slave queue size * soft hard * limit limit */ /*! Processes a new action added to a slave queue. * @return length of sleep in nanoseconds or negative error code * or GU_TIME_ETERNITY for complete stop */ long long gcs_fc_process (gcs_fc_t* fc, ssize_t act_size) { fc->size += act_size; fc->act_count++; if (fc->size <= fc->soft_limit) { /* normal operation */ if (gu_unlikely(fc->debug > 0 && !(fc->act_count % fc->debug))) { gu_info ("FC: queue size: %zdb (%4.1f%% of soft limit)", fc->size, ((double)fc->size)/fc->soft_limit*100.0); } return 0; } else if (fc->size >= fc->hard_limit) { if (0.0 == fc->max_throttle) { /* we can accept total service outage */ return GU_TIME_ETERNITY; } else { gu_error ("Recv queue hard limit exceeded. Can't continue."); return -ENOMEM; } } // else if (!(fc->act_count & 7)) { // do this for every 8th action else { long long end = gu_time_monotonic(); double interval = ((end - fc->start) * 1.0e-9); if (gu_unlikely (0 == fc->last_sleep)) { /* just tripped the soft limit, preparing constants for throttle */ fc->max_rate = (double)(fc->size - fc->init_size) / interval; double s = (1.0 - fc->max_throttle)/(fc->soft_limit-fc->hard_limit); assert (s < 0.0); fc->scale = s * fc->max_rate; fc->offset = (1.0 - s*fc->soft_limit) * fc->max_rate; // calculate time interval from the soft limit interval = interval * (double)(fc->size - fc->soft_limit) / (fc->size - fc->init_size); assert (interval >= 0.0); // Move reference point to soft limit fc->last_sleep = fc->soft_limit; fc->start = end - interval * 1000000000; gu_warn("Soft recv queue limit exceeded, starting replication " "throttle. Measured avg. rate: %f bytes/sec; " "Throttle parameters: scale=%f, offset=%f", fc->max_rate, fc->scale, fc->offset); } /* throttling operation */ double desired_rate = fc->size * fc->scale + fc->offset; // linear decay //double desired_rate = fc->max_rate * fc->max_throttle; // square wave assert (desired_rate <= fc->max_rate); double sleep = (double)(fc->size - fc->last_sleep) / desired_rate - interval; if (gu_unlikely(fc->debug > 0 && !(fc->act_count % fc->debug))) { gu_info ("FC: queue size: %zdb, length: %zd, " "measured rate: %fb/s, desired rate: %fb/s, " "interval: %5.3fs, sleep: %5.4fs. " "Sleeps initiated: %zd, for a total of %6.3fs", fc->size, fc->act_count, ((double)(fc->size - fc->last_sleep))/interval, desired_rate, interval, sleep, fc->sleep_count, fc->sleeps); fc->sleep_count = 0; fc->sleeps = 0.0; } if (gu_likely(sleep < min_sleep)) { #if 0 gu_info ("Skipping sleep: desired_rate = %f, sleep = %f (%f), " "interval = %f, fc->scale = %f, fc->offset = %f, " "fc->size = %zd", desired_rate, sleep, min_sleep, interval, fc->scale, fc->offset, fc->size); #endif return 0; } fc->last_sleep = fc->size; fc->start = end; fc->sleep_count++; fc->sleeps += sleep; return (1000000000LL * sleep); } return 0; } void gcs_fc_debug (gcs_fc_t* fc, long debug_level) { fc->debug = debug_level; } galera-3-25.3.20/gcs/src/gcs_sm.hpp0000644000015300001660000003123013042054732016464 0ustar jenkinsjenkins/* * Copyright (C) 2010-2013 Codership Oy * * $Id$ */ /*! * @file GCS Send Monitor. To ensure fair (FIFO) access to gcs_core_send() */ #ifndef _gcs_sm_h_ #define _gcs_sm_h_ #include "gu_datetime.hpp" #include #include #ifdef GCS_SM_CONCURRENCY #define GCS_SM_CC sm->cc #else #define GCS_SM_CC 1 #endif /* GCS_SM_CONCURRENCY */ typedef struct gcs_sm_user { gu_cond_t* cond; bool wait; } gcs_sm_user_t; typedef struct gcs_sm_stats { long long sample_start;// beginning of the sample period long long pause_start; // start of the pause long long paused_ns; // total nanoseconds paused long long paused_sample; // paused_ns at the beginning of the sample long long send_q_samples; long long send_q_len; long long send_q_len_max; long long send_q_len_min; } gcs_sm_stats_t; typedef struct gcs_sm { gcs_sm_stats_t stats; gu_mutex_t lock; #ifdef GCS_SM_GRAB_RELEASE gu_cond_t cond; long cond_wait; #endif /* GCS_SM_GRAB_RELEASE */ unsigned long wait_q_len; unsigned long wait_q_mask; unsigned long wait_q_head; unsigned long wait_q_tail; long users; long users_min; long users_max; long entered; long ret; #ifdef GCS_SM_CONCURRENCY long cc; #endif /* GCS_SM_CONCURRENCY */ bool pause; gu::datetime::Period wait_time; gcs_sm_user_t wait_q[]; } gcs_sm_t; /*! * Creates send monitor * * @param len size of the monitor, should be a power of 2 * @param n concurrency parameter (how many users can enter at the same time) */ extern gcs_sm_t* gcs_sm_create (long len, long n); /*! * Closes monitor for entering and makes all users to exit with error. * (entered users are not affected). Blocks until everybody exits */ extern long gcs_sm_close (gcs_sm_t* sm); /*! * (Re)opens monitor for entering. */ extern long gcs_sm_open (gcs_sm_t* sm); /*! * Deallocates resources associated with the monitor */ extern void gcs_sm_destroy (gcs_sm_t* sm); #define GCS_SM_INCREMENT(cursor) (cursor = ((cursor + 1) & sm->wait_q_mask)) static inline void _gcs_sm_wake_up_next (gcs_sm_t* sm) { long woken = sm->entered; assert (woken >= 0); assert (woken <= GCS_SM_CC); while (woken < GCS_SM_CC && sm->users > 0) { if (gu_likely(sm->wait_q[sm->wait_q_head].wait)) { assert (NULL != sm->wait_q[sm->wait_q_head].cond); // gu_debug ("Waking up: %lu", sm->wait_q_head); gu_cond_signal (sm->wait_q[sm->wait_q_head].cond); woken++; } else { /* skip interrupted */ assert (NULL == sm->wait_q[sm->wait_q_head].cond); gu_debug ("Skipping interrupted: %lu", sm->wait_q_head); sm->users--; if (gu_unlikely(sm->users < sm->users_min)) { sm->users_min = sm->users; } GCS_SM_INCREMENT(sm->wait_q_head); } } assert (woken <= GCS_SM_CC); assert (sm->users >= 0); } /* wake up whoever might be waiting there */ static inline void _gcs_sm_wake_up_waiters (gcs_sm_t* sm) { #ifdef GCS_SM_GRAB_RELEASE if (gu_unlikely(sm->cond_wait)) { assert (sm->cond_wait > 0); sm->cond_wait--; gu_cond_signal (&sm->cond); } else #endif /* GCS_SM_GRAB_RELEASE */ if (!sm->pause) { _gcs_sm_wake_up_next(sm); } else { /* gcs_sm_continue() will do the rest */ } } static inline void _gcs_sm_leave_common (gcs_sm_t* sm) { assert (sm->entered < GCS_SM_CC); assert (sm->users > 0); sm->users--; if (gu_unlikely(sm->users < sm->users_min)) { sm->users_min = sm->users; } assert (false == sm->wait_q[sm->wait_q_head].wait); assert (NULL == sm->wait_q[sm->wait_q_head].cond); GCS_SM_INCREMENT(sm->wait_q_head); _gcs_sm_wake_up_waiters (sm); } static inline bool _gcs_sm_enqueue_common (gcs_sm_t* sm, gu_cond_t* cond, bool block) { unsigned long tail = sm->wait_q_tail; sm->wait_q[tail].cond = cond; sm->wait_q[tail].wait = true; bool ret; if (block == true) { gu_cond_wait (cond, &sm->lock); assert(tail == sm->wait_q_head || false == sm->wait_q[tail].wait); assert(sm->wait_q[tail].cond == cond || false == sm->wait_q[tail].wait); sm->wait_q[tail].cond = NULL; ret = sm->wait_q[tail].wait; sm->wait_q[tail].wait = false; } else { gu::datetime::Date abstime(gu::datetime::Date::calendar()); abstime = abstime + sm->wait_time; struct timespec ts; abstime._timespec(ts); int waitret = gu_cond_timedwait(cond, &sm->lock, &ts); sm->wait_q[tail].cond = NULL; // sm->wait_time is incremented by second each time cond wait // times out, reset back to one second when cond wait // succeeds. if (waitret == 0) { ret = sm->wait_q[tail].wait; sm->wait_time = std::max(sm->wait_time*2/3, gu::datetime::Period(gu::datetime::Sec)); } else if (waitret == ETIMEDOUT) { if (sm->wait_time < 10 * gu::datetime::Sec) { gu_debug("send monitor wait timed out, waited for %s", to_string(sm->wait_time).c_str()); } else { gu_warn("send monitor wait timed out, waited for %s", to_string(sm->wait_time).c_str()); } ret = false; sm->wait_time = sm->wait_time + gu::datetime::Sec; } else { gu_error("send monitor timedwait failed with %d: %s", waitret, strerror(waitret)); ret = false; } sm->wait_q[tail].wait = false; } return ret; } #ifdef GCS_SM_CONCURRENCY #define GCS_SM_HAS_TO_WAIT \ (sm->users > (sm->entered + 1) || sm->entered >= GCS_SM_CC || sm->pause) #else #define GCS_SM_HAS_TO_WAIT (sm->users > 1 || sm->pause) #endif /* GCS_SM_CONCURRENCY */ /*! * Synchronize with entry order to the monitor. Must be always followed by * gcs_sm_enter(sm, cond, true) * * @retval -EAGAIN - out of space * @retval -EBADFD - monitor closed * @retval >= 0 queue handle */ static inline long gcs_sm_schedule (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); long ret = sm->ret; if (gu_likely((sm->users < (long)sm->wait_q_len) && (0 == ret))) { sm->users++; if (gu_unlikely(sm->users > sm->users_max)) { sm->users_max = sm->users; } GCS_SM_INCREMENT(sm->wait_q_tail); /* even if we don't queue, cursor * needs to be advanced */ sm->stats.send_q_samples++; if (GCS_SM_HAS_TO_WAIT) { ret = sm->wait_q_tail + 1; // waiter handle /* here we want to distinguish between FC pause and real queue */ sm->stats.send_q_len += sm->users - 1; } return ret; // success } else if (0 == ret) { assert (sm->users == (long)sm->wait_q_len); ret = -EAGAIN; } assert(ret < 0); gu_mutex_unlock (&sm->lock); return ret; } /*! * Enter send monitor critical section * * @param sm send monitor object * @param cond condition to signal to wake up thread in case of wait * @param block if true block until entered or send monitor is closed, * if false enter wait times out eventually * * @retval -EAGAIN - out of space * @retval -EBADFD - monitor closed * @retval -EINTR - was interrupted by another thread * @retval 0 - successfully entered */ static inline long gcs_sm_enter (gcs_sm_t* sm, gu_cond_t* cond, bool scheduled, bool block) { long ret = 0; /* if scheduled and no queue */ if (gu_likely (scheduled || (ret = gcs_sm_schedule(sm)) >= 0)) { if (GCS_SM_HAS_TO_WAIT) { if (gu_likely(_gcs_sm_enqueue_common (sm, cond, block))) { ret = sm->ret; } else { ret = -EINTR; } } assert (ret <= 0); if (gu_likely(0 == ret)) { assert(sm->users > 0); assert(sm->entered < GCS_SM_CC); sm->entered++; } else { if (gu_likely(-EINTR == ret)) { /* was interrupted, will be handled by someone else */ } else { /* monitor is closed, wake up others */ assert(sm->users > 0); _gcs_sm_leave_common(sm); } } gu_mutex_unlock (&sm->lock); } return ret; } static inline void gcs_sm_leave (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); sm->entered--; assert(sm->entered >= 0); _gcs_sm_leave_common(sm); gu_mutex_unlock (&sm->lock); } static inline void gcs_sm_pause (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); /* don't pause closed monitor */ if (gu_likely(0 == sm->ret) && !sm->pause) { sm->stats.pause_start = gu_time_monotonic(); sm->pause = true; } gu_mutex_unlock (&sm->lock); } static inline void _gcs_sm_continue_common (gcs_sm_t* sm) { sm->pause = false; _gcs_sm_wake_up_next(sm); /* wake up next waiter if any */ } static inline void gcs_sm_continue (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); if (gu_likely(sm->pause)) { _gcs_sm_continue_common (sm); sm->stats.paused_ns += gu_time_monotonic() - sm->stats.pause_start; } else { gu_debug ("Trying to continue unpaused monitor"); } gu_mutex_unlock (&sm->lock); } /*! * Interrupts waiter identified by handle (returned by gcs_sm_schedule()) * * @retval 0 - success * @retval -ESRCH - waiter is not in the queue. For practical purposes * it is impossible to discern already interrupted waiter and * the waiter that has entered the monitor */ static inline long gcs_sm_interrupt (gcs_sm_t* sm, long handle) { assert (handle > 0); long ret; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); handle--; if (gu_likely(sm->wait_q[handle].wait)) { assert (sm->wait_q[handle].cond != NULL); sm->wait_q[handle].wait = false; gu_cond_signal (sm->wait_q[handle].cond); sm->wait_q[handle].cond = NULL; ret = 0; if (!sm->pause && handle == (long)sm->wait_q_head) { /* gcs_sm_interrupt() was called right after the waiter was * signaled by gcs_sm_continue() or gcs_sm_leave() but before * the waiter has woken up. Wake up the next waiter */ _gcs_sm_wake_up_next(sm); } } else { ret = -ESRCH; } gu_mutex_unlock (&sm->lock); return ret; } /*! * Each call to this function resets stats and starts new sampling interval * * @param q_len current send queue length * @param q_len_avg set to an average number of preceding users seen by each * new one (not including itself) (-1 if stats overflown) * @param q_len_max maximum send queue length since last call * @param q_len_min minimum send queue length since last call * @param paused_ns total time paused (nanoseconds) * @param paused_avg set to a fraction of time which monitor spent in a paused * state (-1 if stats overflown) */ extern void gcs_sm_stats_get (gcs_sm_t* sm, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg, long long* paused_ns, double* paused_avg); /*! resets average/max/min stats calculation */ extern void gcs_sm_stats_flush(gcs_sm_t* sm); #ifdef GCS_SM_GRAB_RELEASE /*! Grabs sm object for out-of-order access * @return 0 or negative error code */ static inline long gcs_sm_grab (gcs_sm_t* sm) { long ret; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); while (!(ret = sm->ret) && sm->entered >= GCS_SM_CC) { sm->cond_wait++; gu_cond_wait (&sm->cond, &sm->lock); } if (ret) { assert (ret < 0); _gcs_sm_wake_up_waiters (sm); } else { assert (sm->entered < GCS_SM_CC); sm->entered++; } gu_mutex_unlock (&sm->lock); return ret; } /*! Releases sm object after gcs_sm_grab() */ static inline void gcs_sm_release (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); sm->entered--; _gcs_sm_wake_up_waiters (sm); gu_mutex_unlock (&sm->lock); } #endif /* GCS_SM_GRAB_RELEASE */ #endif /* _gcs_sm_h_ */ galera-3-25.3.20/gcs/src/gcs_priv.hpp0000644000015300001660000000041213042054732017023 0ustar jenkinsjenkins/* * Copyright (C) 2011 Codership Oy * * $Id$ */ /*! * @file gcs_priv.h Global declarations private to GCS */ #ifndef _gcs_priv_h_ #define _gcs_priv_h_ #include "gcs.hpp" #define GCS_DESYNC_REQ "self-desync" #endif /* _gcs_priv_h_ */ galera-3-25.3.20/gcs/src/gcs_comp_msg.cpp0000644000015300001660000000732513042054732017654 0ustar jenkinsjenkins/* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * Interface to membership messages - implementation * */ #include #include #include #include #define GCS_COMP_MSG_ACCESS #include "gcs_comp_msg.hpp" static inline int comp_msg_size (int memb_num) { return (sizeof(gcs_comp_msg_t) + memb_num * sizeof(gcs_comp_memb_t)); } /*! Allocates membership object and zeroes it */ gcs_comp_msg_t* gcs_comp_msg_new (bool prim, bool bootstrap, int my_idx, int memb_num, int error) { gcs_comp_msg_t* ret; assert ((memb_num > 0 && my_idx >= 0) || (memb_num == 0 && my_idx == -1)); ret = static_cast(gu_calloc (1, comp_msg_size(memb_num))); if (NULL != ret) { ret->primary = prim; ret->bootstrap = bootstrap; ret->my_idx = my_idx; ret->memb_num = memb_num; ret->error = error; } return ret; } gcs_comp_msg_t* gcs_comp_msg_leave (int error) { return gcs_comp_msg_new (false, false, -1, 0, error); } /*! Destroys component message */ void gcs_comp_msg_delete (gcs_comp_msg_t* comp) { gu_free (comp); } /*! Returns total size of the component message */ int gcs_comp_msg_size (const gcs_comp_msg_t* comp) { assert (comp); return comp_msg_size (comp->memb_num); } /*! Adds a member to the component message * Returns an index of the member or negative error code */ int gcs_comp_msg_add (gcs_comp_msg_t* comp, const char* id, gcs_segment_t const segment) { size_t id_len; int i; assert (comp); assert (id); /* check id length */ id_len = strlen (id); if (!id_len) return -EINVAL; if (id_len > GCS_COMP_MEMB_ID_MAX_LEN) return -ENAMETOOLONG; int free_slot = -1; /* find the free id slot and check for id uniqueness */ for (i = 0; i < comp->memb_num; i++) { if (0 == comp->memb[i].id[0] && free_slot < 0) free_slot = i; if (0 == strcmp (comp->memb[i].id, id)) return -ENOTUNIQ; } if (free_slot < 0) return -1; memcpy (comp->memb[free_slot].id, id, id_len); comp->memb[free_slot].segment = segment; return free_slot; } /*! Creates a copy of the component message */ gcs_comp_msg_t* gcs_comp_msg_copy (const gcs_comp_msg_t* comp) { size_t size = gcs_comp_msg_size(comp); gcs_comp_msg_t* ret = static_cast(gu_malloc (size)); if (ret) memcpy (ret, comp, size); return ret; } /*! Returns member ID by index, NULL if none */ const gcs_comp_memb_t* gcs_comp_msg_member (const gcs_comp_msg_t* comp, int idx) { if (0 <= idx && idx < comp->memb_num) return &comp->memb[idx]; else return NULL; } /*! Returns member index by ID, -1 if none */ int gcs_comp_msg_idx (const gcs_comp_msg_t* comp, const char* id) { size_t id_len = strlen(id); int idx = comp->memb_num; if (id_len > 0 && id_len <= GCS_COMP_MEMB_ID_MAX_LEN) for (idx = 0; idx < comp->memb_num; idx++) if (0 == strcmp (comp->memb[idx].id, id)) break; if (comp->memb_num == idx) return -1; else return idx; } /*! Returns primary status of the component */ bool gcs_comp_msg_primary (const gcs_comp_msg_t* comp) { return comp->primary; } /*! Retruns bootstrap flag of the component */ bool gcs_comp_msg_bootstrap(const gcs_comp_msg_t* comp) { return comp->bootstrap; } /*! Returns our own index in the membership */ int gcs_comp_msg_self (const gcs_comp_msg_t* comp) { return comp->my_idx; } /*! Returns number of members in the component */ int gcs_comp_msg_num (const gcs_comp_msg_t* comp) { return comp->memb_num; } int gcs_comp_msg_error(const gcs_comp_msg_t* comp) { return comp->error; } galera-3-25.3.20/gcs/src/gcs_comp_msg.hpp0000644000015300001660000000616113042054732017656 0ustar jenkinsjenkins/* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * Interface to component messages * */ #ifndef _gcs_component_h_ #define _gcs_component_h_ #include #include // should accommodate human readable UUID (without trailing \0) #define GCS_COMP_MEMB_ID_MAX_LEN GU_UUID_STR_LEN /*! members of the same segment are physically closer than the others */ typedef uint8_t gcs_segment_t; typedef struct gcs_comp_memb { char id[GCS_COMP_MEMB_ID_MAX_LEN + 1]; /// ID assigned by the backend gcs_segment_t segment; } gcs_comp_memb_t; #ifdef GCS_COMP_MSG_ACCESS typedef struct gcs_comp_msg { int my_idx; /// this node's index in membership int memb_num; /// number of members in configuration bool primary; /// 1 if we have a quorum, 0 if not bool bootstrap; /// 1 if primary was bootstrapped int error; /// error code gcs_comp_memb_t memb[1]; /// member array } gcs_comp_msg_t; #else typedef struct gcs_comp_msg gcs_comp_msg_t; #endif /*! Allocates new component message * @param prim whether component is primary or not * @param bootstrap whether prim was bootstrapped * @param my_idx this node index in the membership * @param memb_num number of members in component * @param error error code * @return * allocated message buffer */ extern gcs_comp_msg_t* gcs_comp_msg_new (bool prim, bool bootstrap, int my_idx, int memb_num, int error); /*! Standard empty "leave" component message (to be returned on shutdown) */ extern gcs_comp_msg_t* gcs_comp_msg_leave (int error); /*! Destroys component message */ extern void gcs_comp_msg_delete (gcs_comp_msg_t* comp); /*! Adds a member to the component message * Returns an index of the member or negative error code: * -1 when membership is full * -ENOTUNIQ when name collides with one that is in membership already * -ENAMETOOLONG wnen memory allocation for new name fails */ extern int gcs_comp_msg_add (gcs_comp_msg_t* comp, const char* id, gcs_segment_t segment); /*! Returns total size of the component message */ extern int gcs_comp_msg_size (const gcs_comp_msg_t* comp); /*! Creates a copy of the component message */ extern gcs_comp_msg_t* gcs_comp_msg_copy (const gcs_comp_msg_t* comp); /*! Returns member ID by index, NULL if none */ extern const gcs_comp_memb_t* gcs_comp_msg_member (const gcs_comp_msg_t* comp, int idx); /*! Returns member index by ID, -1 if none */ extern int gcs_comp_msg_idx (const gcs_comp_msg_t* comp, const char* id); /*! Returns primary status of the component */ extern bool gcs_comp_msg_primary (const gcs_comp_msg_t* comp); /*! Returns bootstrap flag */ extern bool gcs_comp_msg_bootstrap(const gcs_comp_msg_t* comp); /*! Returns our own idx */ extern int gcs_comp_msg_self (const gcs_comp_msg_t* comp); /*! Returns number of members in the component */ extern int gcs_comp_msg_num (const gcs_comp_msg_t* comp); /*! Returns error code of the component message */ extern int gcs_comp_msg_error(const gcs_comp_msg_t* comp); #endif /* _gcs_component_h_ */ galera-3-25.3.20/gcs/src/gcs_params.cpp0000644000015300001660000001523113042054732017326 0ustar jenkinsjenkins/* * Copyright (C) 2010-2014 Codership Oy * * $Id$ */ #include "gcs_params.hpp" #define __STDC_FORMAT_MACROS #include #include const char* const GCS_PARAMS_FC_FACTOR = "gcs.fc_factor"; const char* const GCS_PARAMS_FC_LIMIT = "gcs.fc_limit"; const char* const GCS_PARAMS_FC_MASTER_SLAVE = "gcs.fc_master_slave"; const char* const GCS_PARAMS_FC_DEBUG = "gcs.fc_debug"; const char* const GCS_PARAMS_SYNC_DONOR = "gcs.sync_donor"; const char* const GCS_PARAMS_MAX_PKT_SIZE = "gcs.max_packet_size"; const char* const GCS_PARAMS_RECV_Q_HARD_LIMIT = "gcs.recv_q_hard_limit"; const char* const GCS_PARAMS_RECV_Q_SOFT_LIMIT = "gcs.recv_q_soft_limit"; const char* const GCS_PARAMS_MAX_THROTTLE = "gcs.max_throttle"; static const char* const GCS_PARAMS_FC_FACTOR_DEFAULT = "1.0"; static const char* const GCS_PARAMS_FC_LIMIT_DEFAULT = "16"; static const char* const GCS_PARAMS_FC_MASTER_SLAVE_DEFAULT = "no"; static const char* const GCS_PARAMS_FC_DEBUG_DEFAULT = "0"; static const char* const GCS_PARAMS_SYNC_DONOR_DEFAULT = "no"; static const char* const GCS_PARAMS_MAX_PKT_SIZE_DEFAULT = "64500"; static ssize_t const GCS_PARAMS_RECV_Q_HARD_LIMIT_DEFAULT = SSIZE_MAX; static const char* const GCS_PARAMS_RECV_Q_SOFT_LIMIT_DEFAULT = "0.25"; static const char* const GCS_PARAMS_MAX_THROTTLE_DEFAULT = "0.25"; bool gcs_params_register(gu_config_t* conf) { bool ret = 0; ret |= gu_config_add (conf, GCS_PARAMS_FC_FACTOR, GCS_PARAMS_FC_FACTOR_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_FC_LIMIT, GCS_PARAMS_FC_LIMIT_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_FC_MASTER_SLAVE, GCS_PARAMS_FC_MASTER_SLAVE_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_FC_DEBUG, GCS_PARAMS_FC_DEBUG_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_SYNC_DONOR, GCS_PARAMS_SYNC_DONOR_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_MAX_PKT_SIZE, GCS_PARAMS_MAX_PKT_SIZE_DEFAULT); char tmp[32] = { 0, }; snprintf (tmp, sizeof(tmp) - 1, "%lld", (long long)GCS_PARAMS_RECV_Q_HARD_LIMIT_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_RECV_Q_HARD_LIMIT, tmp); ret |= gu_config_add (conf, GCS_PARAMS_RECV_Q_SOFT_LIMIT, GCS_PARAMS_RECV_Q_SOFT_LIMIT_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_MAX_THROTTLE, GCS_PARAMS_MAX_THROTTLE_DEFAULT); return ret; } static long params_init_bool (gu_config_t* conf, const char* const name, bool* const var) { bool val; long rc = gu_config_get_bool(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else if (rc > 0) { assert(0); val = false; rc = -EINVAL; } *var = val; return rc; } static long params_init_long (gu_config_t* conf, const char* const name, long min_val, long max_val, long* const var) { int64_t val; long rc = gu_config_get_int64(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else { /* Found parameter value */ if (max_val == min_val) { max_val = LONG_MAX; min_val = LONG_MIN; } if (val < min_val || val > max_val) { gu_error ("%s value out of range [%ld, %ld]: %" PRIi64, name, min_val, max_val, val); return -EINVAL; } } *var = val; return 0; } static long params_init_int64 (gu_config_t* conf, const char* const name, int64_t const min_val, int64_t const max_val, int64_t* const var) { int64_t val; long rc = gu_config_get_int64(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else { /* Found parameter value */ if ((min_val != max_val) && (val < min_val || val > max_val)) { gu_error ("%s value out of range [%" PRIi64 ", %" PRIi64 "]: %" PRIi64, name, min_val, max_val, val); return -EINVAL; } } *var = val; return 0; } static long params_init_double (gu_config_t* conf, const char* const name, double const min_val, double const max_val, double* const var) { double val; long rc = gu_config_get_double(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else { /* Found parameter value */ if ((min_val != max_val) && (val < min_val || val > max_val)) { gu_error ("%s value out of range [%f, %f]: %f", name, min_val, max_val, val); return -EINVAL; } } *var = val; return 0; } long gcs_params_init (struct gcs_params* params, gu_config_t* config) { long ret; if ((ret = params_init_long (config, GCS_PARAMS_FC_LIMIT, 0, LONG_MAX, ¶ms->fc_base_limit))) return ret; if ((ret = params_init_long (config, GCS_PARAMS_FC_DEBUG, 0, LONG_MAX, ¶ms->fc_debug))) return ret; if ((ret = params_init_long (config, GCS_PARAMS_MAX_PKT_SIZE, 0,LONG_MAX, ¶ms->max_packet_size))) return ret; if ((ret = params_init_double (config, GCS_PARAMS_FC_FACTOR, 0.0, 1.0, ¶ms->fc_resume_factor))) return ret; if ((ret = params_init_double (config, GCS_PARAMS_RECV_Q_SOFT_LIMIT, 0.0, 1.0 - 1.e-9, ¶ms->recv_q_soft_limit))) return ret; if ((ret = params_init_double (config, GCS_PARAMS_MAX_THROTTLE, 0.0, 1.0 - 1.e-9, ¶ms->max_throttle))) return ret; int64_t tmp; if ((ret = params_init_int64 (config, GCS_PARAMS_RECV_Q_HARD_LIMIT, 0, 0, &tmp))) return ret; params->recv_q_hard_limit = tmp * 0.9; // allow for some meta overhead if ((ret = params_init_bool (config, GCS_PARAMS_FC_MASTER_SLAVE, ¶ms->fc_master_slave))) return ret; if ((ret = params_init_bool (config, GCS_PARAMS_SYNC_DONOR, ¶ms->sync_donor))) return ret; return 0; } galera-3-25.3.20/gcs/src/gcs_test.hpp0000644000015300001660000001047413042054732017033 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef _gcs_test_h_ #define _gcs_test_h_ // some data to test bugger packets static char gcs_test_data[] = "001 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "002 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "003 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "004 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "005 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "006 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "007 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "008 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "009 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "010 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "011 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "012 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "013 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "014 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "015 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "016 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "017 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "018 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "019 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "020 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "021 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "022 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "023 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "024 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "025 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "026 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "027 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "028 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "029 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "030 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "031 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "032 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "033 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "034 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "035 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "036 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "037 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "038 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "039 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "040 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "041 4567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234" ; #endif galera-3-25.3.20/gcs/src/gcs_fifo_lite.cpp0000644000015300001660000001022513042054732020001 0ustar jenkinsjenkins/* * Copyright (C) 2008-2011 Codership Oy * * $Id$ * * FIFO "class" customized for particular purpose * (here I decided to sacrifice generality for efficiency). * Implements simple fixed size "mallocless" FIFO. * Except gcs_fifo_create() there are two types of fifo * access methods - protected and unprotected. Unprotected * methods assume that calling routines implement their own * protection, and thus are simplified for speed. */ #include #include "gcs_fifo_lite.hpp" /* Creates FIFO object. Since it practically consists of array of (void*), * the length can be chosen arbitrarily high - to minimize the risk * of overflow situation. */ gcs_fifo_lite_t* gcs_fifo_lite_create (size_t length, size_t item_size) { gcs_fifo_lite_t* ret = NULL; uint64_t l = 1; /* check limits */ if (length < 1 || item_size < 1) return NULL; /* Find real length. It must be power of 2*/ while (l < length) l = l << 1; if (l * item_size > (uint64_t)GU_LONG_MAX) { gu_error ("Resulting FIFO size %lld exceeds signed limit: %lld", (long long)(l*item_size), (long long)GU_LONG_MAX); return NULL; } ret = GU_CALLOC (1, gcs_fifo_lite_t); if (ret) { ret->length = l; ret->item_size = item_size; ret->mask = ret->length - 1; ret->closed = true; ret->queue = gu_malloc (ret->length * item_size); if (ret->queue) { gu_mutex_init (&ret->lock, NULL); gu_cond_init (&ret->put_cond, NULL); gu_cond_init (&ret->get_cond, NULL); /* everything else must be initialized to 0 by calloc */ } else { gu_free (ret); ret = NULL; } } return ret; } void gcs_fifo_lite_close (gcs_fifo_lite_t* fifo) { GCS_FIFO_LITE_LOCK; if (fifo->closed) { gu_error ("Trying to close a closed FIFO"); assert(0); } else { fifo->closed = true; // wake whoever is waiting fifo->put_wait = 0; gu_cond_broadcast (&fifo->put_cond); fifo->get_wait = 0; gu_cond_broadcast (&fifo->get_cond); } gu_mutex_unlock (&fifo->lock); } void gcs_fifo_lite_open (gcs_fifo_lite_t* fifo) { GCS_FIFO_LITE_LOCK; if (!fifo->closed) { gu_error ("Trying to open an open FIFO."); assert(0); } else { fifo->closed = false; } gu_mutex_unlock(&fifo->lock); } long gcs_fifo_lite_destroy (gcs_fifo_lite_t* f) { if (f) { if (gu_mutex_lock (&f->lock)) { abort(); } if (f->destroyed) { gu_mutex_unlock (&f->lock); return -EALREADY; } f->closed = true; f->destroyed = true; /* get rid of "put" threads waiting for lock or signal */ while (pthread_cond_destroy (&f->put_cond)) { if (f->put_wait <= 0) { gu_fatal ("Can't destroy condition while nobody's waiting"); abort(); } f->put_wait = 0; gu_cond_broadcast (&f->put_cond); } while (f->used) { /* there are some items in FIFO - and that means * no gcs_fifo_lite_safe_get() is waiting on condition */ gu_mutex_unlock (&f->lock); /* let them get remaining items from FIFO, * we don't know how to deallocate them ourselves. * unfortunately this may take some time */ usleep (10000); /* sleep a bit to avoid busy loop */ gu_mutex_lock (&f->lock); } f->length = 0; /* now all we have - "get" threads waiting for lock or signal */ while (pthread_cond_destroy (&f->get_cond)) { if (f->get_wait <= 0) { gu_fatal ("Can't destroy condition while nobody's waiting"); abort(); } f->get_wait = 0; gu_cond_broadcast (&f->get_cond); } /* at this point there are only functions waiting for lock */ gu_mutex_unlock (&f->lock); while (gu_mutex_destroy (&f->lock)) { /* this should be fast provided safe get and safe put are * wtitten correctly. They should immediately freak out. */ gu_mutex_lock (&f->lock); gu_mutex_unlock (&f->lock); } /* now nobody's waiting for anything */ gu_free (f->queue); gu_free (f); return 0; } return -EINVAL; } galera-3-25.3.20/gcs/src/gcs_gcache.hpp0000644000015300001660000000136713042054732017267 0ustar jenkinsjenkins/* * Copyright (C) 2011 Codership Oy * * $Id$ */ #ifndef _gcs_gcache_h_ #define _gcs_gcache_h_ #ifndef GCS_FOR_GARB #include #else #ifndef gcache_t struct gcache_t; #endif #endif #include #include static inline void* gcs_gcache_malloc (gcache_t* gcache, size_t size) { #ifndef GCS_FOR_GARB if (gu_likely(gcache != NULL)) return gcache_malloc (gcache, size); else #endif return ::malloc (size); } static inline void gcs_gcache_free (gcache_t* gcache, const void* buf) { #ifndef GCS_FOR_GARB if (gu_likely (gcache != NULL)) gcache_free (gcache, buf); else #endif ::free (const_cast(buf)); } #endif /* _gcs_gcache_h_ */ galera-3-25.3.20/gcs/src/gcs_backend.cpp0000644000015300001660000000476713042054732017446 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /*********************************************************/ /* This unit initializes the backend given backend URI */ /*********************************************************/ #include #include #include #include #include "gcs_backend.hpp" #include "gcs_dummy.hpp" #ifdef GCS_USE_SPREAD #include "gcs_spread.h" #endif /* GCS_USE_SPREAD */ #ifdef GCS_USE_VS #include "gcs_vs.h" #endif /* GCS_USE_VS */ #ifdef GCS_USE_GCOMM #include "gcs_gcomm.hpp" #endif /* GCS_USE_GCOMM */ bool gcs_backend_register(gu_config_t* const conf) { bool ret = false; #ifdef GCS_USE_GCOMM ret |= gcs_gcomm_register(conf); #endif /* GCS_USE_GCOMM */ #ifdef GCS_USE_VS #endif /* GCS_USE_VS */ #ifdef GCS_USE_SPREAD #endif /* GCS_USE_SPREAD */ ret |= gcs_dummy_register(conf); return ret; } /* Static array describing backend ID - open() pairs */ static struct { const char* id; gcs_backend_create_t create; } const backend[] = { #ifdef GCS_USE_GCOMM { "gcomm", gcs_gcomm_create}, #endif /* GCS_USE_GCOMM */ #ifdef GCS_USE_VS { "vsbes", gcs_vs_create }, #endif /* GCS_USE_VS */ #ifdef GCS_USE_SPREAD { "spread", gcs_spread_create }, #endif /* GCS_USE_SPREAD */ { "dummy", gcs_dummy_create }, { NULL, NULL } // terminating pair }; static const char backend_sep[] = "://"; /* Returns true if backend matches, false otherwise */ static bool backend_type_is (const char* uri, const char* type, const size_t len) { if (len == strlen(type)) { if (!strncmp (uri, type, len)) return true; } return false; } long gcs_backend_init (gcs_backend_t* const bk, const char* const uri, gu_config_t* const conf) { const char* sep; assert (NULL != bk); assert (NULL != uri); sep = strstr (uri, backend_sep); if (NULL != sep) { size_t type_len = sep - uri; const char* addr = sep + strlen(backend_sep); long i; /* try to match any of specified backends */ for (i = 0; backend[i].id != NULL; i++) { if (backend_type_is (uri, backend[i].id, type_len)) return backend[i].create (bk, addr, conf); } /* no backends matched */ gu_error ("Backend not supported: %s", uri); return -ESOCKTNOSUPPORT; } gu_error ("Invalid backend URI: %s", uri); return -EINVAL; } galera-3-25.3.20/gcs/src/gcs_recv_msg.hpp0000644000015300001660000000114513042054732017654 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * Receiving message context */ #ifndef _gcs_recv_msg_h_ #define _gcs_recv_msg_h_ #include "gcs_msg_type.hpp" typedef struct gcs_recv_msg { void* buf; int buf_len; int size; int sender_idx; gcs_msg_type_t type; gcs_recv_msg() { } gcs_recv_msg(void* b, long bl, long sz, long si, gcs_msg_type_t t) : buf(b), buf_len(bl), size(sz), sender_idx(si), type(t) { } } gcs_recv_msg_t; #endif /* _gcs_recv_msg_h_ */ galera-3-25.3.20/gcs/src/gcs_spread.hpp0000644000015300001660000000041713042054732017326 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Definition of Spread GC backend */ #ifndef _gcs_spread_h_ #define _gcs_spread_h_ #include "gcs_backend.h" extern GCS_BACKEND_CREATE_FN (gcs_spread_create); #endif /* _gcs_spread_h_ */ galera-3-25.3.20/gcs/src/gcs_gcomm.cpp0000644000015300001660000005632613042054732017157 0ustar jenkinsjenkins/* * Copyright (C) 2009-2016 Codership Oy */ /*! * @file GComm GCS Backend implementation * * @todo Figure out if there is lock-free way to handle RecvBuf * push/pop operations. * */ #include "gcs_gcomm.hpp" // We access data comp msg struct directly #define GCS_COMP_MSG_ACCESS 1 #include "gcs_comp_msg.hpp" #include #include #include #ifdef PROFILE_GCS_GCOMM #define GCOMM_PROFILE 1 #else #undef GCOMM_PROFILE #endif // PROFILE_GCS_GCOMM #include #include #include #include #include #include #include #include using namespace std; using namespace gu; using namespace gu::prodcons; using namespace gu::datetime; using namespace gcomm; using namespace prof; static const std::string gcomm_thread_schedparam_opt("gcomm.thread_prio"); class RecvBufData { public: RecvBufData(const size_t source_idx, const Datagram& dgram, const ProtoUpMeta& um) : source_idx_(source_idx), dgram_ (dgram), um_ (um) { } size_t get_source_idx() const { return source_idx_; } const Datagram& get_dgram() const { return dgram_; } const ProtoUpMeta& get_um() const { return um_; } private: size_t source_idx_; Datagram dgram_; ProtoUpMeta um_; }; #if defined(GALERA_USE_BOOST_POOL_ALLOC) #include typedef deque > #else typedef deque #endif /* GALERA_USE_BOOST_POOL_ALLOC */ RecvBufQueue; class RecvBuf { private: class Waiting { public: Waiting (bool& w) : w_(w) { w_ = true; } ~Waiting() { w_ = false; } private: bool& w_; }; public: RecvBuf() : mutex_(), cond_(), queue_(), waiting_(false) { } void push_back(const RecvBufData& p) { Lock lock(mutex_); queue_.push_back(p); if (waiting_ == true) { cond_.signal(); } } const RecvBufData& front(const Date& timeout) { Lock lock(mutex_); while (queue_.empty()) { Waiting w(waiting_); if (gu_likely (timeout == GU_TIME_ETERNITY)) { lock.wait(cond_); } else { lock.wait(cond_, timeout); } } assert (false == waiting_); return queue_.front(); } void pop_front() { Lock lock(mutex_); assert(queue_.empty() == false); queue_.pop_front(); } private: Mutex mutex_; Cond cond_; RecvBufQueue queue_; bool waiting_; }; class MsgData : public MessageData { public: MsgData(const byte_t* data, const size_t data_size, const gcs_msg_type_t msg_type) : data_ (data), data_size_(data_size), msg_type_ (msg_type) { } const byte_t* get_data() const { return data_; } size_t get_data_size() const { return data_size_; } gcs_msg_type_t get_msg_type() const { return msg_type_; } public: MsgData(const MsgData&); void operator=(const MsgData&); const byte_t* data_; size_t data_size_; gcs_msg_type_t msg_type_; }; class GCommConn : public Consumer, public Toplay { public: GCommConn(const URI& u, gu::Config& cnf) : Toplay(cnf), conf_(cnf), uuid_(), thd_(), schedparam_(conf_.get(gcomm_thread_schedparam_opt)), barrier_(2), uri_(u), net_(Protonet::create(conf_)), tp_(0), mutex_(), refcnt_(0), terminated_(false), error_(0), recv_buf_(), current_view_(), prof_("gcs_gcomm") { log_info << "backend: " << net_->type(); } ~GCommConn() { delete net_; } const gcomm::UUID& get_uuid() const { return uuid_; } static void* run_fn(void* arg) { static_cast(arg)->run(); return 0; } void connect(bool) { } void connect(const string& channel, bool const bootstrap) { if (tp_ != 0) { gu_throw_fatal << "backend connection already open"; } error_ = ENOTCONN; int err; if ((err = pthread_create(&thd_, 0, &run_fn, this)) != 0) { gu_throw_error(err) << "Failed to create thread"; } // Helper to call barrier_.wait() when goes out of scope class StartBarrier { public: StartBarrier(Barrier& barrier) : barrier_(barrier) { } ~StartBarrier() { barrier_.wait(); } private: Barrier& barrier_; } start_barrier(barrier_); thread_set_schedparam(thd_, schedparam_); log_info << "gcomm thread scheduling priority set to " << thread_get_schedparam(thd_) << " "; uri_.set_option("gmcast.group", channel); tp_ = Transport::create(*net_, uri_); gcomm::connect(tp_, this); if (bootstrap) { log_info << "gcomm: bootstrapping new group '" << channel << '\''; } else { string peer; URI::AuthorityList::const_iterator i, i_next; for (i = uri_.get_authority_list().begin(); i != uri_.get_authority_list().end(); ++i) { i_next = i; ++i_next; string host; string port; try { host = i->host(); } catch (NotSet&) { } try { port = i->port(); } catch (NotSet&) { } peer += host != "" ? host + ":" + port : ""; if (i_next != uri_.get_authority_list().end()) { peer += ","; } } log_info << "gcomm: connecting to group '" << channel << "', peer '" << peer << "'"; } tp_->connect(bootstrap); uuid_ = tp_->uuid(); error_ = 0; log_info << "gcomm: connected"; } void close(bool force = false) { if (tp_ == 0) { log_warn << "gcomm: backend already closed"; return; } { gcomm::Critical crit(*net_); log_info << "gcomm: terminating thread"; terminate(); } log_info << "gcomm: joining thread"; pthread_join(thd_, 0); { gcomm::Critical crit(*net_); log_info << "gcomm: closing backend"; tp_->close(error_ != 0 || force == true); gcomm::disconnect(tp_, this); delete tp_; tp_ = 0; } const Message* msg; while ((msg = get_next_msg()) != 0) { return_ack(Message(&msg->get_producer(), 0, -ECONNABORTED)); } log_info << "gcomm: closed"; log_debug << prof_; } void run(); void notify() { net_->interrupt(); } void terminate() { Lock lock(mutex_); terminated_ = true; net_->interrupt(); } void handle_up (const void* id, const Datagram& dg, const ProtoUpMeta& um); void queue_and_wait(const Message& msg, Message* ack); RecvBuf& get_recv_buf() { return recv_buf_; } size_t get_mtu() const { if (tp_ == 0) { gu_throw_fatal << "GCommConn::get_mtu(): " << "backend connection not open"; } return tp_->mtu(); } Protonet& get_pnet() { return *net_; } gu::Config& get_conf() { return conf_; } int get_error() const { return error_; } void get_status(gu::Status& status) const { if (tp_ != 0) tp_->get_status(status); } gu::ThreadSchedparam schedparam() const { return schedparam_; } class Ref { public: Ref(gcs_backend_t* ptr, bool unset = false) : conn_(0) { if (ptr->conn != 0) { conn_ = reinterpret_cast(ptr->conn)->ref(unset); if (unset == true) { ptr->conn = 0; } } } ~Ref() { if (conn_ != 0) { conn_->unref(); } } GCommConn* get() { return conn_; } private: Ref(const Ref&); void operator=(const Ref&); GCommConn* conn_; }; private: GCommConn(const GCommConn&); void operator=(const GCommConn&); GCommConn* ref(const bool unsetting) { return this; } void unref() { } gu::Config& conf_; gcomm::UUID uuid_; pthread_t thd_; ThreadSchedparam schedparam_; Barrier barrier_; URI uri_; Protonet* net_; Transport* tp_; Mutex mutex_; size_t refcnt_; bool terminated_; int error_; RecvBuf recv_buf_; View current_view_; Profile prof_; }; void GCommConn::handle_up(const void* id, const Datagram& dg, const ProtoUpMeta& um) { if (um.err_no() != 0) { error_ = um.err_no(); // force backend close close(true); recv_buf_.push_back(RecvBufData(numeric_limits::max(), dg, um)); } else if (um.has_view() == true) { current_view_ = um.view(); recv_buf_.push_back(RecvBufData(numeric_limits::max(), dg, um)); if (current_view_.is_empty()) { log_debug << "handle_up: self leave"; } } else { size_t idx(0); for (NodeList::const_iterator i = current_view_.members().begin(); i != current_view_.members().end(); ++i) { if (NodeList::key(i) == um.source()) { profile_enter(prof_); recv_buf_.push_back(RecvBufData(idx, dg, um)); profile_leave(prof_); break; } ++idx; } assert(idx < current_view_.members().size()); } } void GCommConn::queue_and_wait(const Message& msg, Message* ack) { { Lock lock(mutex_); if (terminated_ == true) { *ack = Message(&msg.get_producer(), 0, -ECONNABORTED); return; } } profile_enter(prof_); Consumer::queue_and_wait(msg, ack); profile_leave(prof_); } void GCommConn::run() { barrier_.wait(); if (error_ != 0) pthread_exit(0); while (true) { { Lock lock(mutex_); if (terminated_ == true) { break; } } try { net_->event_loop(Sec); } catch (gu::Exception& e) { log_error << "exception from gcomm, backend must be restarted: " << e.what(); // Commented out due to Backtrace() not producing proper // backtraces. // log_info << "attempting to get backtrace:"; // Backtrace().print(std::cerr); gcomm::Critical crit(get_pnet()); handle_up(0, Datagram(), ProtoUpMeta(gcomm::UUID::nil(), ViewId(V_NON_PRIM), 0, 0xff, O_DROP, -1, e.get_errno())); break; } #if 0 // Disabled catching unknown exceptions due to Backtrace() not // producing proper backtraces. We let the application crash // and deal with diagnostics. catch (...) { log_error << "unknow exception from gcomm, backend must be restarted"; log_info << "attempting to get backtrace:"; Backtrace().print(std::cerr); gcomm::Critical crit(get_pnet()); handle_up(0, Datagram(), ProtoUpMeta(gcomm::UUID::nil(), ViewId(V_NON_PRIM), 0, 0xff, O_DROP, -1, gu::Exception::E_UNSPEC)); break; } #endif } } //////////////////////////////////////////////////////////////////////////// // // Backend interface implementation // //////////////////////////////////////////////////////////////////////////// static GCS_BACKEND_MSG_SIZE_FN(gcomm_msg_size) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -1; } return ref.get()->get_mtu(); } static GCS_BACKEND_SEND_FN(gcomm_send) { GCommConn::Ref ref(backend); if (gu_unlikely(ref.get() == 0)) { return -EBADFD; } GCommConn& conn(*ref.get()); Datagram dg( SharedBuffer( new Buffer(reinterpret_cast(buf), reinterpret_cast(buf) + len))); int err; // Set thread scheduling params if gcomm thread runs with // non-default params gu::ThreadSchedparam orig_sp; if (conn.schedparam() != gu::ThreadSchedparam::system_default) { try { orig_sp = gu::thread_get_schedparam(pthread_self()); gu::thread_set_schedparam(pthread_self(), conn.schedparam()); } catch (gu::Exception& e) { err = e.get_errno(); } } { gcomm::Critical crit(conn.get_pnet()); if (gu_unlikely(conn.get_error() != 0)) { err = ECONNABORTED; } else { err = conn.send_down( dg, ProtoDownMeta(msg_type, msg_type == GCS_MSG_CAUSAL ? O_LOCAL_CAUSAL : O_SAFE)); } } if (conn.schedparam() != gu::ThreadSchedparam::system_default) { try { gu::thread_set_schedparam(pthread_self(), orig_sp); } catch (gu::Exception& e) { err = e.get_errno(); } } return (err == 0 ? len : -err); } static void fill_cmp_msg(const View& view, const gcomm::UUID& my_uuid, gcs_comp_msg_t* cm) { size_t n(0); for (NodeList::const_iterator i = view.members().begin(); i != view.members().end(); ++i) { const gcomm::UUID& uuid(NodeList::key(i)); log_debug << "member: " << n << " uuid: " << uuid << " segment: " << static_cast(i->second.segment()); // (void)snprintf(cm->memb[n].id, GCS_COMP_MEMB_ID_MAX_LEN, "%s", // uuid._str().c_str()); long ret = gcs_comp_msg_add (cm, uuid.full_str().c_str(), i->second.segment()); if (ret < 0) { gu_throw_error(-ret) << "Failed to add member '" << uuid << "' to component message."; } if (uuid == my_uuid) { log_debug << "my index " << n; cm->my_idx = n; } ++n; } } static GCS_BACKEND_RECV_FN(gcomm_recv) { GCommConn::Ref ref(backend); if (gu_unlikely(ref.get() == 0)) return -EBADFD; try { GCommConn& conn(*ref.get()); RecvBuf& recv_buf(conn.get_recv_buf()); const RecvBufData& d(recv_buf.front(timeout)); msg->sender_idx = d.get_source_idx(); const Datagram& dg(d.get_dgram()); const ProtoUpMeta& um(d.get_um()); if (gu_likely(dg.len() != 0)) { assert(dg.len() > dg.offset()); const byte_t* b(gcomm::begin(dg)); const ssize_t pload_len(gcomm::available(dg)); msg->size = pload_len; if (gu_likely(pload_len <= msg->buf_len)) { memcpy(msg->buf, b, pload_len); msg->type = static_cast(um.user_type()); recv_buf.pop_front(); } else { msg->type = GCS_MSG_ERROR; } } else if (um.err_no() != 0) { gcs_comp_msg_t* cm(gcs_comp_msg_leave(ECONNABORTED)); const ssize_t cm_size(gcs_comp_msg_size(cm)); if (cm_size <= msg->buf_len) { memcpy(msg->buf, cm, cm_size); recv_buf.pop_front(); msg->type = GCS_MSG_COMPONENT; } else { msg->type = GCS_MSG_ERROR; } gcs_comp_msg_delete(cm); } else { assert(um.has_view() == true); const View& view(um.view()); assert(view.type() == V_PRIM || view.type() == V_NON_PRIM); gcs_comp_msg_t* cm(gcs_comp_msg_new(view.type() == V_PRIM, view.is_bootstrap(), view.is_empty() ? -1 : 0, view.members().size(), 0)); const ssize_t cm_size(gcs_comp_msg_size(cm)); if (cm->my_idx == -1) { log_debug << "gcomm recv: self leave"; } msg->size = cm_size; if (gu_likely(cm_size <= msg->buf_len)) { fill_cmp_msg(view, conn.get_uuid(), cm); memcpy(msg->buf, cm, cm_size); recv_buf.pop_front(); msg->type = GCS_MSG_COMPONENT; } else { msg->type = GCS_MSG_ERROR; } gcs_comp_msg_delete(cm); } return msg->size; } catch (Exception& e) { long err = e.get_errno(); if (ETIMEDOUT != err) { log_error << e.what(); } return -err; } } static GCS_BACKEND_NAME_FN(gcomm_name) { static const char *name = "gcomm"; return name; } static GCS_BACKEND_OPEN_FN(gcomm_open) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -EBADFD; } GCommConn& conn(*ref.get()); try { gcomm::Critical crit(conn.get_pnet()); conn.connect(channel, bootstrap); } catch (Exception& e) { log_error << "failed to open gcomm backend connection: " << e.get_errno() << ": " << e.what(); return -e.get_errno(); } return 0; } static GCS_BACKEND_CLOSE_FN(gcomm_close) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -EBADFD; } GCommConn& conn(*ref.get()); try { // Critical section is entered inside close() call. // gcomm::Critical crit(conn.get_pnet()); conn.close(); } catch (Exception& e) { log_error << "failed to close gcomm backend connection: " << e.get_errno() << ": " << e.what(); gcomm::Critical crit(conn.get_pnet()); conn.handle_up(0, Datagram(), ProtoUpMeta(gcomm::UUID::nil(), ViewId(V_NON_PRIM), 0, 0xff, O_DROP, -1, e.get_errno())); // #661: Pretend that closing was successful, backend should be // in unusable state anyway. This allows gcs to finish shutdown // sequence properly. } return 0; } static GCS_BACKEND_DESTROY_FN(gcomm_destroy) { GCommConn::Ref ref(backend, true); if (ref.get() == 0) { log_warn << "could not get reference to backend conn"; return -EBADFD; } GCommConn* conn(ref.get()); try { delete conn; } catch (Exception& e) { log_warn << "conn destroy failed: " << e.get_errno(); return -e.get_errno(); } return 0; } static GCS_BACKEND_PARAM_SET_FN(gcomm_param_set) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -EBADFD; } GCommConn& conn(*ref.get()); try { gcomm::Critical crit(conn.get_pnet()); if (gu_unlikely(conn.get_error() != 0)) { return -ECONNABORTED; } if (conn.get_pnet().set_param(key, value) == false) { log_debug << "param " << key << " not recognized"; return 1; } else { return 0; } } catch (gu::Exception& e) { log_warn << "error setting param " << key << " to value " << value << ": " << e.what(); return -e.get_errno(); } catch (gu::NotFound& nf) { log_warn << "error setting param " << key << " to value " << value; return -EINVAL; } catch (gu::NotSet& nf) { log_warn << "error setting param " << key << " to value " << value; return -EINVAL; } catch (...) { log_fatal << "gcomm param set: caught unknown exception"; return -ENOTRECOVERABLE; } } static GCS_BACKEND_PARAM_GET_FN(gcomm_param_get) { return NULL; } static GCS_BACKEND_STATUS_GET_FN(gcomm_status_get) { GCommConn::Ref ref(backend); if (ref.get() == 0) { gu_throw_error(-EBADFD); } GCommConn& conn(*ref.get()); gcomm::Critical crit(conn.get_pnet()); conn.get_status(status); } GCS_BACKEND_REGISTER_FN(gcs_gcomm_register) { try { reinterpret_cast(cnf)->add(gcomm_thread_schedparam_opt, ""); gcomm::Conf::register_params(*reinterpret_cast(cnf)); return false; } catch (...) { return true; } } GCS_BACKEND_CREATE_FN(gcs_gcomm_create) { GCommConn* conn(0); if (!cnf) { log_error << "Null config object passed to constructor."; return -EINVAL; } try { gu::URI uri(std::string("pc://") + addr); gu::Config& conf(*reinterpret_cast(cnf)); conn = new GCommConn(uri, conf); } catch (Exception& e) { log_error << "failed to create gcomm backend connection: " << e.get_errno() << ": " << e.what(); return -e.get_errno(); } backend->open = gcomm_open; backend->close = gcomm_close; backend->destroy = gcomm_destroy; backend->send = gcomm_send; backend->recv = gcomm_recv; backend->name = gcomm_name; backend->msg_size = gcomm_msg_size; backend->param_set = gcomm_param_set; backend->param_get = gcomm_param_get; backend->status_get = gcomm_status_get; backend->conn = reinterpret_cast(conn); return 0; } galera-3-25.3.20/gcs/src/gcs_group.hpp0000644000015300001660000001712413042054732017207 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ */ /* * This header defines node specific context we need to maintain */ #ifndef _gcs_group_h_ #define _gcs_group_h_ #include #include "gcs_gcache.hpp" #include "gcs_node.hpp" #include "gcs_recv_msg.hpp" #include "gcs_seqno.hpp" #include "gcs_state_msg.hpp" #include "gu_status.hpp" #include "gu_utils.hpp" typedef enum gcs_group_state { GCS_GROUP_NON_PRIMARY, GCS_GROUP_WAIT_STATE_UUID, GCS_GROUP_WAIT_STATE_MSG, GCS_GROUP_PRIMARY, GCS_GROUP_STATE_MAX } gcs_group_state_t; extern const char* gcs_group_state_str[]; typedef struct gcs_group { gcache_t* cache; gcs_seqno_t act_id_; // current(last) action seqno gcs_seqno_t conf_id; // current configuration seqno gu_uuid_t state_uuid; // state exchange id gu_uuid_t group_uuid; // group UUID long num; // number of nodes long my_idx; // my index in the group const char* my_name; const char* my_address; gcs_group_state_t state; // group state: PRIMARY | NON_PRIMARY gcs_seqno_t last_applied; // last_applied action group-wide long last_node; // node that reported last_applied bool frag_reset; // indicate that fragmentation was reset gcs_node_t* nodes; // array of node contexts /* values from the last primary component */ gu_uuid_t prim_uuid; gu_seqno_t prim_seqno; long prim_num; gcs_node_state_t prim_state; /* max supported protocols */ gcs_proto_t const gcs_proto_ver; int const repl_proto_ver; int const appl_proto_ver; gcs_state_quorum_t quorum; int last_applied_proto_ver; gcs_group() : gcs_proto_ver(0), repl_proto_ver(0), appl_proto_ver(0) { } } gcs_group_t; /*! * Initialize group at startup */ extern int gcs_group_init (gcs_group_t* group, gcache_t* cache, const char* node_name, ///< can be null const char* inc_addr, ///< can be null gcs_proto_t gcs_proto_ver, int repl_proto_ver, int appl_proto_ver); /*! * Initialize group action history parameters. See gcs.h */ extern int gcs_group_init_history (gcs_group_t* group, gcs_seqno_t seqno, const gu_uuid_t* uuid); /*! * Free group resources */ extern void gcs_group_free (gcs_group_t* group); /*! Forget the action if it is not to be delivered */ extern void gcs_group_ignore_action (gcs_group_t* group, struct gcs_act_rcvd* rcvd); /*! * Handles component message - installs new membership, * cleans old one. * * @return * group state in case of success or * negative error code. */ extern gcs_group_state_t gcs_group_handle_comp_msg (gcs_group_t* group, const gcs_comp_msg_t* msg); extern gcs_group_state_t gcs_group_handle_uuid_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); extern gcs_group_state_t gcs_group_handle_state_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); extern gcs_seqno_t gcs_group_handle_last_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); /*! @return 0 for success, 1 for (success && i_am_sender) * or negative error code */ extern int gcs_group_handle_join_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); /*! @return 0 for success, 1 for (success && i_am_sender) * or negative error code */ extern int gcs_group_handle_sync_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); /*! @return 0 if request is ignored, request size if it should be passed up */ extern int gcs_group_handle_state_request (gcs_group_t* group, struct gcs_act_rcvd* act); /*! * Handles action message. Is called often - therefore, inlined * * @return negative - error code, 0 - continue, positive - complete action */ static inline ssize_t gcs_group_handle_act_msg (gcs_group_t* const group, const gcs_act_frag_t* const frg, const gcs_recv_msg_t* const msg, struct gcs_act_rcvd* const rcvd, bool commonly_supported_version) { long const sender_idx = msg->sender_idx; bool const local = (sender_idx == group->my_idx); ssize_t ret; assert (GCS_MSG_ACTION == msg->type); assert (sender_idx < group->num); assert (frg->act_id > 0); assert (frg->act_size > 0); // clear reset flag if set by own first fragment after reset flag was set group->frag_reset = (group->frag_reset && !(local && 0 == frg->frag_no && GCS_GROUP_PRIMARY == group->state)); ret = gcs_node_handle_act_frag (&group->nodes[sender_idx], frg, &rcvd->act, local); if (ret > 0) { assert (ret == rcvd->act.buf_len); rcvd->act.type = frg->act_type; if (gu_likely(GCS_ACT_TORDERED == rcvd->act.type && GCS_GROUP_PRIMARY == group->state && group->nodes[sender_idx].status >= GCS_NODE_STATE_DONOR && !(group->frag_reset && local) && commonly_supported_version)) { /* Common situation - * increment and assign act_id only for totally ordered actions * and only in PRIM (skip messages while in state exchange) */ rcvd->id = ++group->act_id_; } else if (GCS_ACT_TORDERED == rcvd->act.type) { /* Rare situations */ if (local) { /* Let the sender know that it failed */ rcvd->id = -ERESTART; gu_debug("Returning -ERESTART for TORDERED action: group->state" " = %s, sender->status = %s, frag_reset = %s, " "buf = %p", gcs_group_state_str[group->state], gcs_node_state_to_str(group->nodes[sender_idx].status), group->frag_reset ? "true" : "false", rcvd->act.buf); } else { /* Just ignore it */ ret = 0; gcs_group_ignore_action (group, rcvd); } } } return ret; } static inline gcs_group_state_t gcs_group_state (const gcs_group_t* group) { return group->state; } static inline bool gcs_group_is_primary (const gcs_group_t* group) { return (GCS_GROUP_PRIMARY == group->state); } static inline int gcs_group_my_idx (const gcs_group_t* group) { return group->my_idx; } /*! * Creates new configuration action * @param group group handle * @param act GCS action object * @param proto protocol version gcs should use for this configuration */ extern ssize_t gcs_group_act_conf (gcs_group_t* group, struct gcs_act* act, int* proto); /*! Returns state object for state message */ extern gcs_state_msg_t* gcs_group_get_state (const gcs_group_t* group); /*! * find a donor and return its index, if available. pure function. * @return donor index of negative error code. * -EHOSTUNREACH if no available donor. * -EHOSTDOWN if donor is joiner. * -EAGAIN if no node in proper state. */ extern int gcs_group_find_donor(const gcs_group_t* group, int const str_version, int const joiner_idx, const char* const donor_string, int const donor_len, const gu_uuid_t* ist_uuid, gcs_seqno_t ist_seqno); extern void gcs_group_get_status(const gcs_group_t* group, gu::Status& status); #endif /* _gcs_group_h_ */ galera-3-25.3.20/gcs/src/SConscript0000644000015300001660000000562013042054732016516 0ustar jenkinsjenkins# Import('env') # Clone environment as we need to tune compilation flags libgcs_env = env.Clone() # Include paths libgcs_env.Append(CPPPATH = Split(''' #/galerautils/src #/gcomm/src #/gcache/src ''')) # Backends (TODO: Get from global options) libgcs_env.Append(CPPFLAGS = ' -DGCS_USE_GCOMM') # For C-style logging libgcs_env.Append(CPPFLAGS = ' -DGALERA_LOG_H_ENABLE_CXX -Wno-variadic-macros') # Disable old style cast warns until code is fixed libgcs_env.Append(CPPFLAGS = ' -Wno-old-style-cast') # Allow zero sized arrays libgcs_env.Replace(CCFLAGS = libgcs_env['CCFLAGS'].replace('-pedantic', '')) libgcs_env.Append(CPPFLAGS = ' -Wno-missing-field-initializers') libgcs_env.Append(CPPFLAGS = ' -Wno-effc++') print libgcs_env['CFLAGS'] print libgcs_env['CCFLAGS'] print libgcs_env['CPPFLAGS'] print libgcs_env['CXXFLAGS'] gcs4garb_env = libgcs_env.Clone() libgcs_sources = Split(''' gcs_params.cpp gcs_conf.cpp gcs_fifo_lite.cpp gcs_msg_type.cpp gcs_comp_msg.cpp gcs_sm.cpp gcs_backend.cpp gcs_dummy.cpp gcs_act_proto.cpp gcs_defrag.cpp gcs_state_msg.cpp gcs_node.cpp gcs_group.cpp gcs_core.cpp gcs_fc.cpp gcs.cpp gcs_gcomm.cpp ''') #libgcs_env.VariantDir('.gcs', '.', duplicate=0) libgcs_env.StaticLibrary('gcs', libgcs_sources) # TODO: How to tell scons portably that C++ linker should be used # and program should be linked statically gcs_test_env = libgcs_env.Clone() gcs_test_env.Prepend(LIBS = File('#/galerautils/src/libgalerautils.a')) gcs_test_env.Prepend(LIBS = File('#/galerautils/src/libgalerautils++.a')) gcs_test_env.Prepend(LIBS = File('#/gcomm/src/libgcomm.a')) gcs_test_env.Prepend(LIBS = File('#/gcache/src/libgcache.a')) gcs_test_env.Prepend(LIBS = File('#/gcs/src/libgcs.a')) gcs_test_env.Program(target = 'gcs_test', source = 'gcs_test.cpp', LINK = libgcs_env['CXX']) SConscript('unit_tests/SConscript') # env.Append(LIBGALERA_OBJS = libgcs_env.SharedObject(libgcs_sources)) gcs4garb_env.Append(CPPFLAGS = ' -DGCS_FOR_GARB') garb_obj_dir = '.garb' gcs4garb_env.VariantDir(garb_obj_dir, '.', duplicate = 0) #garb_objects = [os.path.splitext(src)[0] + '_garb' + # env['OBJSUFFIX'] for src in libgcs_sources] garb_sources = [ garb_obj_dir + '/' + src for src in libgcs_sources ] gcs4garb_env.StaticLibrary('gcs4garb', garb_sources) Clean('.', garb_obj_dir) galera-3-25.3.20/gcs/src/gcs_gcomm.hpp0000644000015300001660000000042713042054732017153 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ #ifndef _gcs_gcomm_h_ #define _gcs_gcomm_h_ #include "gcs_backend.hpp" extern GCS_BACKEND_REGISTER_FN(gcs_gcomm_register); extern GCS_BACKEND_CREATE_FN(gcs_gcomm_create); #endif /* _gcs_vs_h_ */ galera-3-25.3.20/gcs/src/gcs_act.hpp0000644000015300001660000000166213042054732016622 0ustar jenkinsjenkins/* * Copyright (C) 2008-2011 Codership Oy * * $Id$ */ #ifndef _gcs_act_h_ #define _gcs_act_h_ #include "gcs.hpp" struct gcs_act { const void* buf; ssize_t buf_len; gcs_act_type_t type; gcs_act() { } gcs_act(const void* b, ssize_t bl, gcs_act_type_t t) : buf(b), buf_len(bl), type(t) { } gcs_act(const gcs_act& a) : buf(a.buf), buf_len(a.buf_len), type(a.type) { } }; struct gcs_act_rcvd { struct gcs_act act; const struct gu_buf* local; // local buffer vector if any gcs_seqno_t id; // global total order seqno int sender_idx; gcs_act_rcvd() { } gcs_act_rcvd(const gcs_act& a, const struct gu_buf* loc, gcs_seqno_t i, int si) : act(a), local(loc), id(i), sender_idx(si) { } }; #endif /* _gcs_act_h_ */ galera-3-25.3.20/gcs/src/gcs_core.hpp0000644000015300001660000001337413042054732017006 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * This header defines generic communication layer * which implements basic open/close/send/receive * functions. Its purpose is to implement all * functionality common to all group communication * uses. Currently this amounts to action * fragmentation/defragmentation and invoking backend * functions. * In the course of development it has become clear * that such fuctionality must be collected in a * separate layer. * Application abstraction layer is based on this one * and uses those functions for its own purposes. */ #ifndef _gcs_core_h_ #define _gcs_core_h_ #include "gcs.hpp" #include "gcs_act.hpp" #include "gcs_act_proto.hpp" #include #include #include /* 'static' method to register configuration variables */ extern bool gcs_core_register (gu_config_t* conf); struct gcs_core; typedef struct gcs_core gcs_core_t; /* * Allocates context resources private to * generic communicaton layer - send/recieve buffers and the like. */ extern gcs_core_t* gcs_core_create (gu_config_t* conf, gcache_t* cache, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver); /* initializes action history (global seqno, group UUID). See gcs.h */ extern long gcs_core_init (gcs_core_t* core, gcs_seqno_t seqno, const gu_uuid_t* uuid); /* * gcs_core_open() opens connection * Return values: * zero - success * negative - error code */ extern long gcs_core_open (gcs_core_t* conn, const char* channel, const char* url, bool bootstrap); /* * gcs_core_close() puts connection in a closed state, * cancelling all ongoing calls. * Return values: * zero - success * negative - error code */ extern long gcs_core_close (gcs_core_t* conn); /* * gcs_core_destroy() frees resources allocated by gcs_core_create() * Return values: * zero - success * negative - error code */ extern long gcs_core_destroy (gcs_core_t* conn); /* * gcs_core_send() atomically sends action to group. * * NOT THREAD SAFE! Access should be serialized. * * Return values: * non-negative - amount of action bytes sent (sans headers) * negative - error code * -EAGAIN - operation should be retried * -ENOTCONN - connection to primary component lost * * NOTE: Successful return code here does not guarantee delivery to group. * The real status of action is determined only in gcs_core_recv() call. */ extern ssize_t gcs_core_send (gcs_core_t* core, const struct gu_buf* act, size_t act_size, gcs_act_type_t act_type); /* * gcs_core_recv() blocks until some action is received from group. * * @param repl_buf ptr to replicated action local buffer (NULL otherwise) * @param timeout absolute timeout date (as in pthread_cond_timedwait()) * * Return values: * non-negative - the size of action received * negative - error code * * @retval -ETIMEDOUT means no messages were received until timeout. * * NOTE: Action status (replicated or not) is carried in act_id. E.g. -ENOTCONN * means connection to primary component was lost while sending, * -ERESTART means that action delivery was interrupted and it must be * resent. */ extern ssize_t gcs_core_recv (gcs_core_t* conn, struct gcs_act_rcvd* recv_act, long long timeout); /* group protocol version */ extern gcs_proto_t gcs_core_group_protocol_version (const gcs_core_t* conn); /* Configuration functions */ /* Sets maximum message size to achieve requested network packet size. * In case of failure returns negative error code, in case of success - * resulting message payload size (size of action fragment) */ extern int gcs_core_set_pkt_size (gcs_core_t* conn, int pkt_size); /* sends this node's last applied value to group */ extern long gcs_core_set_last_applied (gcs_core_t* core, gcs_seqno_t seqno); /* sends status of the ended snapshot (snapshot seqno or error code) */ extern long gcs_core_send_join (gcs_core_t* core, gcs_seqno_t seqno); /* sends SYNC notice, seqno currently has no meaning */ extern long gcs_core_send_sync (gcs_core_t* core, gcs_seqno_t seqno); /* sends flow control message */ extern long gcs_core_send_fc (gcs_core_t* core, const void* fc, size_t fc_size); extern gcs_seqno_t gcs_core_caused(gcs_core_t* core); extern long gcs_core_param_set (gcs_core_t* core, const char* key, const char* value); extern const char* gcs_core_param_get (gcs_core_t* core, const char* key); void gcs_core_get_status(gcs_core_t* core, gu::Status& status); #ifdef GCS_CORE_TESTING /* gcs_core_send() interface does not allow enough concurrency control to model * various race conditions for unit testing - it is not atomic. The functions * below expose gcs_core unit internals solely for the purpose of testing */ #include "gcs_msg_type.hpp" #include "gcs_backend.hpp" extern gcs_backend_t* gcs_core_get_backend (gcs_core_t* core); // switches lock-step mode on/off extern void gcs_core_send_lock_step (gcs_core_t* core, bool enable); // step through action send process (send another fragment). // returns positive number if there was a send thread waiting for it. extern long gcs_core_send_step (gcs_core_t* core, long timeout_ms); extern void gcs_core_set_state_uuid (gcs_core_t* core, const gu_uuid_t* uuid); #include "gcs_group.hpp" extern const gcs_group_t* gcs_core_get_group (const gcs_core_t* core); #include "gcs_fifo_lite.hpp" extern gcs_fifo_lite_t* gcs_core_get_fifo (gcs_core_t* core); #endif /* GCS_CORE_TESTING */ #endif /* _gcs_core_h_ */ galera-3-25.3.20/gcs/src/gcs_dummy.cpp0000644000015300001660000002303513042054732017177 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * Dummy backend implementation * */ #include #include #include #include #include #include #include #define GCS_COMP_MSG_ACCESS // for gcs_comp_memb_t #ifndef GCS_DUMMY_TESTING #define GCS_DUMMY_TESTING #endif #include "gcs_dummy.hpp" typedef struct dummy_msg { gcs_msg_type_t type; ssize_t len; long sender_idx; uint8_t buf[]; } dummy_msg_t; static inline dummy_msg_t* dummy_msg_create (gcs_msg_type_t const type, size_t const len, long const sender, const void* const buf) { dummy_msg_t *msg = NULL; if ((msg = static_cast(gu_malloc (sizeof(dummy_msg_t) + len)))) { memcpy (msg->buf, buf, len); msg->len = len; msg->type = type; msg->sender_idx = sender; } return msg; } static inline long dummy_msg_destroy (dummy_msg_t *msg) { if (msg) { gu_free (msg); } return 0; } typedef enum dummy_state { DUMMY_DESTROYED, DUMMY_CLOSED, DUMMY_NON_PRIM, DUMMY_TRANS, DUMMY_PRIM, } dummy_state_t; typedef struct gcs_backend_conn { gu_fifo_t* gc_q; /* "serializator" */ volatile dummy_state_t state; gcs_seqno_t msg_id; const size_t max_pkt_size; const size_t hdr_size; const size_t max_send_size; long my_idx; long memb_num; gcs_comp_memb_t* memb; } dummy_t; static GCS_BACKEND_DESTROY_FN(dummy_destroy) { dummy_t* dummy = backend->conn; if (!dummy || dummy->state != DUMMY_CLOSED) return -EBADFD; // gu_debug ("Deallocating message queue (serializer)"); gu_fifo_destroy (dummy->gc_q); if (dummy->memb) gu_free (dummy->memb); gu_free (dummy); backend->conn = NULL; return 0; } static GCS_BACKEND_SEND_FN(dummy_send) { int err = 0; dummy_t* dummy = backend->conn; if (gu_unlikely(NULL == dummy)) return -EBADFD; if (gu_likely(DUMMY_PRIM == dummy->state)) { err = gcs_dummy_inject_msg (backend, buf, len, msg_type, backend->conn->my_idx); } else { static long send_error[DUMMY_PRIM] = { -EBADFD, -EBADFD, -ENOTCONN, -EAGAIN }; err = send_error[dummy->state]; } return err; } static GCS_BACKEND_RECV_FN(dummy_recv) { long ret = 0; dummy_t* conn = backend->conn; msg->sender_idx = GCS_SENDER_NONE; msg->type = GCS_MSG_ERROR; assert (conn); /* skip it if we already have popped a message from the queue * in the previous call */ if (gu_likely(DUMMY_CLOSED <= conn->state)) { int err; dummy_msg_t** ptr = static_cast( gu_fifo_get_head (conn->gc_q, &err)); if (gu_likely(ptr != NULL)) { dummy_msg_t* dmsg = *ptr; assert (NULL != dmsg); msg->type = dmsg->type; msg->sender_idx = dmsg->sender_idx; ret = dmsg->len; msg->size = ret; if (gu_likely(dmsg->len <= msg->buf_len)) { gu_fifo_pop_head (conn->gc_q); memcpy (msg->buf, dmsg->buf, dmsg->len); dummy_msg_destroy (dmsg); } else { // supplied recv buffer too short, leave the message in queue memcpy (msg->buf, dmsg->buf, msg->buf_len); gu_fifo_release (conn->gc_q); } } else { ret = -EBADFD; // closing gu_debug ("Returning %d: %s", ret, strerror(-ret)); } } else { ret = -EBADFD; } return ret; } static GCS_BACKEND_NAME_FN(dummy_name) { return "built-in dummy backend"; } static GCS_BACKEND_MSG_SIZE_FN(dummy_msg_size) { const long max_pkt_size = backend->conn->max_pkt_size; if (pkt_size > max_pkt_size) { gu_warn ("Requested packet size: %d, maximum possible packet size: %d", pkt_size, max_pkt_size); return (max_pkt_size - backend->conn->hdr_size); } return (pkt_size - backend->conn->hdr_size); } static GCS_BACKEND_OPEN_FN(dummy_open) { long ret = -ENOMEM; dummy_t* dummy = backend->conn; gcs_comp_msg_t* comp; if (!dummy) { gu_debug ("Backend not initialized"); return -EBADFD; } if (!bootstrap) { dummy->state = DUMMY_TRANS; return 0; } comp = gcs_comp_msg_new (true, false, 0, 1, 0); if (comp) { ret = gcs_comp_msg_add (comp, "11111111-2222-3333-4444-555555555555",0); assert (0 == ret); // we have only one member, index = 0 dummy->state = DUMMY_TRANS; // required by gcs_dummy_set_component() ret = gcs_dummy_set_component (backend, comp); // install new component if (ret >= 0) { // queue the message ret = gcs_comp_msg_size(comp); ret = gcs_dummy_inject_msg (backend, comp, ret, GCS_MSG_COMPONENT, GCS_SENDER_NONE); if (ret > 0) ret = 0; } gcs_comp_msg_delete (comp); } gu_debug ("Opened backend connection: %d (%s)", ret, strerror(-ret)); return ret; } static GCS_BACKEND_CLOSE_FN(dummy_close) { long ret = -ENOMEM; dummy_t* dummy = backend->conn; gcs_comp_msg_t* comp; if (!dummy) return -EBADFD; comp = gcs_comp_msg_leave (0); if (comp) { ret = gcs_comp_msg_size(comp); ret = gcs_dummy_inject_msg (backend, comp, ret, GCS_MSG_COMPONENT, GCS_SENDER_NONE); // Here's a race condition - some other thread can send something // after leave message. But caller should guarantee serial access. gu_fifo_close (dummy->gc_q); if (ret > 0) ret = 0; gcs_comp_msg_delete (comp); } dummy->state = DUMMY_CLOSED; return ret; } static GCS_BACKEND_PARAM_SET_FN(dummy_param_set) { return 1; } static GCS_BACKEND_PARAM_GET_FN(dummy_param_get) { return NULL; } GCS_BACKEND_STATUS_GET_FN(dummy_status_get) { } GCS_BACKEND_CREATE_FN(gcs_dummy_create) { long ret = -ENOMEM; dummy_t* dummy = NULL; if (!(dummy = GU_CALLOC(1, dummy_t))) goto out0; dummy->state = DUMMY_CLOSED; *(size_t*)(&dummy->max_pkt_size) = (size_t) sysconf (_SC_PAGESIZE); *(size_t*)(&dummy->hdr_size) = sizeof(dummy_msg_t); *(size_t*)(&dummy->max_send_size) = dummy->max_pkt_size - dummy->hdr_size; if (!(dummy->gc_q = gu_fifo_create (1 << 16, sizeof(void*)))) goto out1; backend->conn = NULL; backend->open = dummy_open; backend->close = dummy_close; backend->destroy = dummy_destroy; backend->send = dummy_send; backend->recv = dummy_recv; backend->name = dummy_name; backend->msg_size = dummy_msg_size; backend->param_set = dummy_param_set; backend->param_get = dummy_param_get; backend->status_get = dummy_status_get; backend->conn = dummy; // set data return 0; out1: gu_free (dummy); out0: backend->conn = NULL; return ret; } GCS_BACKEND_REGISTER_FN(gcs_dummy_register) { return false; } /*! Injects a message in the message queue to produce a desired msg sequence. */ long gcs_dummy_inject_msg (gcs_backend_t* backend, const void* buf, size_t buf_len, gcs_msg_type_t type, long sender_idx) { long ret; size_t send_size = buf_len < backend->conn->max_send_size ? buf_len : backend->conn->max_send_size; dummy_msg_t* msg = dummy_msg_create (type, send_size, sender_idx, buf); if (msg) { dummy_msg_t** ptr = static_cast( gu_fifo_get_tail (backend->conn->gc_q)); if (gu_likely(ptr != NULL)) { *ptr = msg; gu_fifo_push_tail (backend->conn->gc_q); ret = send_size; } else { dummy_msg_destroy (msg); ret = -EBADFD; // closed } } else { ret = -ENOMEM; } return ret; } /*! Sets the new component view. * The same component message should be injected in the queue separately * (see gcs_dummy_inject_msg()) in order to model different race conditions */ long gcs_dummy_set_component (gcs_backend_t* backend, const gcs_comp_msg_t* comp) { dummy_t* dummy = backend->conn; long new_num = gcs_comp_msg_num (comp); long i; assert (dummy->state > DUMMY_CLOSED); if (dummy->memb_num != new_num) { void* tmp = gu_realloc (dummy->memb, new_num * sizeof(gcs_comp_memb_t)); if (NULL == tmp) return -ENOMEM; dummy->memb = static_cast(tmp); dummy->memb_num = new_num; } for (i = 0; i < dummy->memb_num; i++) { strcpy ((char*)&dummy->memb[i], gcs_comp_msg_member(comp, i)->id); } dummy->my_idx = gcs_comp_msg_self(comp); dummy->state = gcs_comp_msg_primary(comp) ? DUMMY_PRIM : DUMMY_NON_PRIM; gu_debug ("Setting state to %s", DUMMY_PRIM == dummy->state ? "DUMMY_PRIM" : "DUMMY_NON_PRIM"); return 0; } /*! Is needed to set transitional state */ long gcs_dummy_set_transitional (gcs_backend_t* backend) { backend->conn->state = DUMMY_TRANS; return 0; } galera-3-25.3.20/gcs/src/gcs_conf.cpp0000644000015300001660000000113513042054732016766 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* Logging options */ #include #include "gcs.hpp" long gcs_conf_set_log_file (FILE *file) { return gu_conf_set_log_file (file); } long gcs_conf_set_log_callback (void (*logger) (int, const char*)) { return gu_conf_set_log_callback (logger); } long gcs_conf_self_tstamp_on () { return gu_conf_self_tstamp_on (); } long gcs_conf_self_tstamp_off () { return gu_conf_self_tstamp_off (); } long gcs_conf_debug_on () { return gu_conf_debug_on (); } long gcs_conf_debug_off () { return gu_conf_debug_off (); } galera-3-25.3.20/gcs/src/gcs_params.hpp0000644000015300001660000000230713042054732017333 0ustar jenkinsjenkins/* * Copyright (C) 2010-2014 Codership Oy * * $Id$ */ #ifndef _gcs_params_h_ #define _gcs_params_h_ #include "galerautils.h" struct gcs_params { double fc_resume_factor; double recv_q_soft_limit; double max_throttle; ssize_t recv_q_hard_limit; long fc_base_limit; long max_packet_size; long fc_debug; bool fc_master_slave; bool sync_donor; }; extern const char* const GCS_PARAMS_FC_FACTOR; extern const char* const GCS_PARAMS_FC_LIMIT; extern const char* const GCS_PARAMS_FC_MASTER_SLAVE; extern const char* const GCS_PARAMS_FC_DEBUG; extern const char* const GCS_PARAMS_SYNC_DONOR; extern const char* const GCS_PARAMS_MAX_PKT_SIZE; extern const char* const GCS_PARAMS_RECV_Q_HARD_LIMIT; extern const char* const GCS_PARAMS_RECV_Q_SOFT_LIMIT; extern const char* const GCS_PARAMS_MAX_THROTTLE; /*! Register configuration parameters */ extern bool gcs_params_register(gu_config_t* config); /*! Initializes parameters from config * * @return 0 in case of success, * -EINVAL if some values were set incorrectly in config */ extern long gcs_params_init (struct gcs_params* params, gu_config_t* config); #endif /* _gcs_params_h_ */ galera-3-25.3.20/gcs/src/gcs_test.sh0000755000015300001660000000105613042054732016655 0ustar jenkinsjenkins#!/bin/sh # # This script checks the output of the gcs_test program # to verify that all actions that were sent were received # intact # # $Id$ SEND_LOG="gcs_test_send.log" RECV_LOG="gcs_test_recv.log" echo "Sent action count: $(wc -l $SEND_LOG)" echo "Received action count: $(wc -l $RECV_LOG)" SEND_MD5=$(cat "$SEND_LOG" | awk '{ print $4 " " $5 }'| sort -n -k 2 | tee sort_send | md5sum) echo "send_log md5: $SEND_MD5" RECV_MD5=$(cat "$RECV_LOG" | awk '{ print $4 " " $5 }'| sort -n -k 2 | tee sort_recv | md5sum) echo "recv_log md5: $RECV_MD5" # galera-3-25.3.20/gcs/src/gcs.cpp0000644000015300001660000020061113042054732015761 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * Top-level application interface implementation. */ #include #include #include #include #include #include #include #include "gcs_priv.hpp" #include "gcs_params.hpp" #include "gcs_fc.hpp" #include "gcs_seqno.hpp" #include "gcs_core.hpp" #include "gcs_fifo_lite.hpp" #include "gcs_sm.hpp" #include "gcs_gcache.hpp" const char* gcs_node_state_to_str (gcs_node_state_t state) { static const char* str[GCS_NODE_STATE_MAX + 1] = { "NON-PRIMARY", "PRIMARY", "JOINER", "DONOR", "JOINED", "SYNCED", "UNKNOWN" }; if (state < GCS_NODE_STATE_MAX) return str[state]; return str[GCS_NODE_STATE_MAX]; } const char* gcs_act_type_to_str (gcs_act_type_t type) { static const char* str[GCS_ACT_UNKNOWN + 1] = { "TORDERED", "COMMIT_CUT", "STATE_REQUEST", "CONFIGURATION", "JOIN", "SYNC", "FLOW", "SERVICE", "ERROR", "UNKNOWN" }; if (type < GCS_ACT_UNKNOWN) return str[type]; return str[GCS_ACT_UNKNOWN]; } static const long GCS_MAX_REPL_THREADS = 16384; typedef enum { GCS_CONN_SYNCED, // caught up with the rest of the group GCS_CONN_JOINED, // state transfer complete GCS_CONN_DONOR, // in state transfer, donor GCS_CONN_JOINER, // in state transfer, joiner GCS_CONN_PRIMARY, // in primary conf, needs state transfer GCS_CONN_OPEN, // just connected to group, non-primary GCS_CONN_CLOSED, GCS_CONN_DESTROYED, GCS_CONN_ERROR, GCS_CONN_STATE_MAX } gcs_conn_state_t; #define GCS_CLOSED_ERROR -EBADFD; // file descriptor in bad state static const char* gcs_conn_state_str[GCS_CONN_STATE_MAX] = { "SYNCED", "JOINED", "DONOR/DESYNCED", "JOINER", "PRIMARY", "OPEN", "CLOSED", "DESTROYED", "ERROR" }; static bool const GCS_FC_STOP = true; static bool const GCS_FC_CONT = false; /** Flow control message */ struct gcs_fc_event { uint32_t conf_id; // least significant part of configuraiton seqno uint32_t stop; // boolean value } __attribute__((__packed__)); struct gcs_conn { long my_idx; long memb_num; char* my_name; char* channel; char* socket; gcs_conn_state_t state; gu_config_t* config; bool config_is_local; struct gcs_params params; gcache_t* gcache; gcs_sm_t* sm; gcs_seqno_t local_act_id; /* local seqno of the action */ gcs_seqno_t global_seqno; /* A queue for threads waiting for replicated actions */ gcs_fifo_lite_t* repl_q; gu_thread_t send_thread; /* A queue for threads waiting for received actions */ gu_fifo_t* recv_q; ssize_t recv_q_size; gu_thread_t recv_thread; /* Message receiving timeout - absolute date in nanoseconds */ long long timeout; /* Flow Control */ gu_mutex_t fc_lock; uint32_t conf_id; // configuration ID long stop_sent; // how many STOPs - CONTs were sent long stop_count; // counts stop requests received long queue_len; // slave queue length long upper_limit; // upper slave queue limit long lower_limit; // lower slave queue limit long fc_offset; // offset for catchup phase gcs_conn_state_t max_fc_state; // maximum state when FC is enabled long stats_fc_sent; // FC stats counters long stats_fc_received; // gcs_fc_t stfc; // state transfer FC object /* #603, #606 join control */ bool volatile need_to_join; gcs_seqno_t volatile join_seqno; /* sync control */ bool sync_sent_; bool sync_sent() const { assert(gu_fifo_locked(recv_q)); return sync_sent_; } void sync_sent(bool const val) { assert(gu_fifo_locked(recv_q)); sync_sent_ = val; } /* gcs_core object */ gcs_core_t* core; // the context that is returned by // the core group communication system int inner_close_count; // how many times _close has been called. int outer_close_count; // how many times gcs_close has been called. }; // Oh C++, where art thou? struct gcs_recv_act { struct gcs_act_rcvd rcvd; gcs_seqno_t local_id; }; struct gcs_repl_act { const struct gu_buf* act_in; struct gcs_action* action; gu_mutex_t wait_mutex; gu_cond_t wait_cond; gcs_repl_act(const struct gu_buf* a_act_in, struct gcs_action* a_action) : act_in(a_act_in), action(a_action) { } }; /*! Releases resources associated with parameters */ static void _cleanup_params (gcs_conn_t* conn) { if (conn->config_is_local) gu_config_destroy(conn->config); } /*! Creates local configuration object if no external is submitted */ static long _init_params (gcs_conn_t* conn, gu_config_t* conf) { long rc; conn->config = conf; conn->config_is_local = false; if (!conn->config) { conn->config = gu_config_create(); if (conn->config) { conn->config_is_local = true; } else { rc = -ENOMEM; goto enomem; } } rc = gcs_params_init (&conn->params, conn->config); if (!rc) return 0; _cleanup_params (conn); enomem: gu_error ("Parameter initialization failed: %s", strerror (-rc)); return rc; } /* Creates a group connection handle */ gcs_conn_t* gcs_create (gu_config_t* const conf, gcache_t* const gcache, const char* const node_name, const char* const inc_addr, int const repl_proto_ver, int const appl_proto_ver) { gcs_conn_t* conn = GU_CALLOC (1, gcs_conn_t); if (!conn) { gu_error ("Could not allocate GCS connection handle: %s", strerror (ENOMEM)); return NULL; } if (_init_params (conn, conf)) { goto init_params_failed; } if (gcs_fc_init (&conn->stfc, conn->params.recv_q_hard_limit, conn->params.recv_q_soft_limit, conn->params.max_throttle)) { gu_error ("FC initialization failed"); goto fc_init_failed; } conn->state = GCS_CONN_DESTROYED; conn->core = gcs_core_create (conf, gcache, node_name, inc_addr, repl_proto_ver, appl_proto_ver); if (!conn->core) { gu_error ("Failed to create core."); goto core_create_failed; } conn->repl_q = gcs_fifo_lite_create (GCS_MAX_REPL_THREADS, sizeof (struct gcs_repl_act*)); if (!conn->repl_q) { gu_error ("Failed to create repl_q."); goto repl_q_failed; } { size_t recv_q_len = gu_avphys_bytes() / sizeof(struct gcs_recv_act) / 4; gu_debug ("Requesting recv queue len: %zu", recv_q_len); conn->recv_q = gu_fifo_create (recv_q_len, sizeof(struct gcs_recv_act)); } if (!conn->recv_q) { gu_error ("Failed to create recv_q."); goto recv_q_failed; } conn->sm = gcs_sm_create(1<<16, 1); if (!conn->sm) { gu_error ("Failed to create send monitor"); goto sm_create_failed; } conn->state = GCS_CONN_CLOSED; conn->my_idx = -1; conn->local_act_id = GCS_SEQNO_FIRST; conn->global_seqno = 0; conn->fc_offset = 0; conn->timeout = GU_TIME_ETERNITY; conn->gcache = gcache; conn->max_fc_state = conn->params.sync_donor ? GCS_CONN_DONOR : GCS_CONN_JOINED; gu_mutex_init (&conn->fc_lock, NULL); return conn; // success sm_create_failed: gu_fifo_destroy (conn->recv_q); recv_q_failed: gcs_fifo_lite_destroy (conn->repl_q); repl_q_failed: gcs_core_destroy (conn->core); core_create_failed: fc_init_failed: _cleanup_params (conn); init_params_failed: gu_free (conn); gu_error ("Failed to create GCS connection handle."); return NULL; // failure } long gcs_init (gcs_conn_t* conn, gcs_seqno_t seqno, const uint8_t uuid[GU_UUID_LEN]) { if (GCS_CONN_CLOSED == conn->state) { return gcs_core_init (conn->core, seqno, (const gu_uuid_t*)uuid); } else { gu_error ("State must be CLOSED"); if (conn->state < GCS_CONN_CLOSED) return -EBUSY; else // DESTROYED return -EBADFD; } } /*! * Checks if we should freak out on send/recv errors. * Sometimes errors are ok, e.g. when attempting to send FC_CONT message * on a closing connection. This can happen because GCS connection state * change propagation from lower layers to upper layers is not atomic. * * @param err error code returned by send/recv function * @param warning warning to log if necessary * @return 0 if error can be ignored, original err value if not */ static long gcs_check_error (long err, const char* warning) { switch (err) { case -ENOTCONN: case -ECONNABORTED: if (NULL != warning) { gu_warn ("%s: %d (%s)", warning, err, strerror(-err)); } err = 0; break; default:; } return err; } static inline long gcs_send_fc_event (gcs_conn_t* conn, bool stop) { struct gcs_fc_event fc = { htogl(conn->conf_id), stop }; return gcs_core_send_fc (conn->core, &fc, sizeof(fc)); } /* To be called under slave queue lock. Returns true if FC_STOP must be sent */ static inline bool gcs_fc_stop_begin (gcs_conn_t* conn) { long err = 0; bool ret = (conn->stop_count <= 0 && conn->stop_sent <= 0 && conn->queue_len > (conn->upper_limit + conn->fc_offset) && conn->state <= conn->max_fc_state && !(err = gu_mutex_lock (&conn->fc_lock))); if (gu_unlikely(err)) { gu_fatal ("Mutex lock failed: %d (%s)", err, strerror(err)); abort(); } conn->stop_sent += ret; return ret; } /* Complement to gcs_fc_stop_begin. */ static inline long gcs_fc_stop_end (gcs_conn_t* conn) { long ret; gu_debug ("SENDING FC_STOP (local seqno: %lld, fc_offset: %ld)", conn->local_act_id, conn->fc_offset); ret = gcs_send_fc_event (conn, GCS_FC_STOP); if (ret >= 0) { ret = 0; conn->stats_fc_sent++; } else { conn->stop_sent--; assert (conn->stop_sent >= 0); } gu_mutex_unlock (&conn->fc_lock); ret = gcs_check_error (ret, "Failed to send FC_STOP signal"); return ret; } /* To be called under slave queue lock. Returns true if FC_CONT must be sent */ static inline bool gcs_fc_cont_begin (gcs_conn_t* conn) { long err = 0; bool queue_decreased = (conn->fc_offset > conn->queue_len && (conn->fc_offset = conn->queue_len, true)); bool ret = (conn->stop_sent > 0 && (conn->lower_limit >= conn->queue_len || queue_decreased) && conn->state <= conn->max_fc_state && !(err = gu_mutex_lock (&conn->fc_lock))); if (gu_unlikely(err)) { gu_fatal ("Mutex lock failed: %d (%s)", err, strerror(err)); abort(); } conn->stop_sent -= ret; // decrement optimistically to allow for parallel // recv threads return ret; } /* Complement to gcs_fc_cont_begin() */ static inline long gcs_fc_cont_end (gcs_conn_t* conn) { long ret; assert (GCS_CONN_DONOR >= conn->state); gu_debug ("SENDING FC_CONT (local seqno: %lld, fc_offset: %ld)", conn->local_act_id, conn->fc_offset); ret = gcs_send_fc_event (conn, GCS_FC_CONT); if (gu_likely (ret >= 0)) { ret = 0; } conn->stop_sent += (ret != 0); // fix count in case of error gu_mutex_unlock (&conn->fc_lock); ret = gcs_check_error (ret, "Failed to send FC_CONT signal"); return ret; } /* To be called under slave queue lock. Returns true if SYNC must be sent */ static inline bool gcs_send_sync_begin (gcs_conn_t* conn) { if (gu_unlikely(GCS_CONN_JOINED == conn->state)) { if (conn->lower_limit >= conn->queue_len && !conn->sync_sent()) { // tripped lower slave queue limit, send SYNC message conn->sync_sent(true); #if 0 gu_info ("Sending SYNC: state = %s, queue_len = %ld, " "lower_limit = %ld, sync_sent = %s", gcs_conn_state_str[conn->state], conn->queue_len, conn->lower_limit, conn->sync_sent() ? "true" : "false"); #endif return true; } #if 0 else { gu_info ("Not sending SYNC: state = %s, queue_len = %ld, " "lower_limit = %ld, sync_sent = %s", gcs_conn_state_str[conn->state], conn->queue_len, conn->lower_limit, conn->sync_sent() ? "true" : "false"); } #endif } return false; } static inline long gcs_send_sync_end (gcs_conn_t* conn) { long ret = 0; gu_debug ("SENDING SYNC"); ret = gcs_core_send_sync (conn->core, 0); if (gu_likely (ret >= 0)) { ret = 0; } else { gu_fifo_lock(conn->recv_q); conn->sync_sent(false); gu_fifo_release(conn->recv_q); } ret = gcs_check_error (ret, "Failed to send SYNC signal"); return ret; } static inline long gcs_send_sync (gcs_conn_t* conn) { gu_fifo_lock(conn->recv_q); bool const send_sync(gcs_send_sync_begin (conn)); gu_fifo_release(conn->recv_q); if (send_sync) { return gcs_send_sync_end (conn); } else { return 0; } } /*! * State transition functions - just in case we want to add something there. * @todo: need to be reworked, see #231 */ static bool gcs_shift_state (gcs_conn_t* const conn, gcs_conn_state_t const new_state) { static const bool allowed [GCS_CONN_STATE_MAX][GCS_CONN_STATE_MAX] = { // SYNCED JOINED DONOR JOINER PRIM OPEN CLOSED DESTR { false, true, false, false, false, false, false, false }, // SYNCED { false, false, true, true, false, false, false, false }, // JOINED { true, true, true, false, false, false, false, false }, // DONOR { false, false, false, false, true, false, false, false }, // JOINER { true, true, true, true, true, true, false, false }, // PRIMARY { true, true, true, true, true, false, true, false }, // OPEN { true, true, true, true, true, true, false, false }, // CLOSED { false, false, false, false, false, false, true, false } // DESTROYED }; gcs_conn_state_t const old_state = conn->state; if (!allowed[new_state][old_state]) { if (old_state != new_state) { gu_warn ("Shifting %s -> %s is not allowed (TO: %lld)", gcs_conn_state_str[old_state], gcs_conn_state_str[new_state], conn->global_seqno); } return false; } if (old_state != new_state) { gu_info ("Shifting %s -> %s (TO: %lld)", gcs_conn_state_str[old_state], gcs_conn_state_str[new_state], conn->global_seqno); conn->state = new_state; } return true; } static void gcs_become_open (gcs_conn_t* conn) { gcs_shift_state (conn, GCS_CONN_OPEN); } static long gcs_set_pkt_size (gcs_conn_t *conn, long pkt_size) { if (conn->state != GCS_CONN_CLOSED) return -EPERM; // #600 workaround long ret = gcs_core_set_pkt_size (conn->core, pkt_size); if (ret >= 0) { conn->params.max_packet_size = ret; gu_config_set_int64 (conn->config, GCS_PARAMS_MAX_PKT_SIZE, conn->params.max_packet_size); } return ret; } static long _release_flow_control (gcs_conn_t* conn) { int err = 0; if (gu_unlikely(err = gu_mutex_lock (&conn->fc_lock))) { gu_fatal ("Mutex lock failed: %d (%s)", err, strerror(err)); abort(); } if (conn->stop_sent) { assert (1 == conn->stop_sent); conn->stop_sent--; err = gcs_fc_cont_end (conn); } else { gu_mutex_unlock (&conn->fc_lock); } return err; } static void gcs_become_primary (gcs_conn_t* conn) { if (!gcs_shift_state (conn, GCS_CONN_PRIMARY)) { gu_fatal ("Protocol violation, can't continue"); gcs_close (conn); abort(); } long ret; if ((ret = _release_flow_control (conn))) { gu_fatal ("Failed to release flow control: %ld (%s)", ret, strerror(ret)); gcs_close (conn); abort(); } } static void gcs_become_joiner (gcs_conn_t* conn) { if (!gcs_shift_state (conn, GCS_CONN_JOINER)) { gu_fatal ("Protocol violation, can't continue"); assert (0); abort(); } if (gcs_fc_init (&conn->stfc, conn->params.recv_q_hard_limit, conn->params.recv_q_soft_limit, conn->params.max_throttle)) { gu_fatal ("Becoming JOINER: FC initialization failed, can't continue."); abort(); } gcs_fc_reset (&conn->stfc, conn->recv_q_size); gcs_fc_debug (&conn->stfc, conn->params.fc_debug); } // returns 1 if accepts, 0 if rejects, negative error code if fails. static long gcs_become_donor (gcs_conn_t* conn) { if (gcs_shift_state (conn, GCS_CONN_DONOR)) { long err = 0; if (conn->max_fc_state < GCS_CONN_DONOR) { err = _release_flow_control (conn); } return (0 == err ? 1 : err); } gu_warn ("Rejecting State Transfer Request in state '%s'. " "Joiner should be restarted.", gcs_conn_state_str[conn->state]); if (conn->state < GCS_CONN_OPEN){ ssize_t err; gu_warn ("Received State Transfer Request in wrong state %s. " "Rejecting.", gcs_conn_state_str[conn->state]); // reject the request. // error handling currently is way too simplistic err = gcs_join (conn, -EPROTO); if (err < 0 && !(err == -ENOTCONN || err == -EBADFD)) { gu_fatal ("Failed to send State Transfer Request rejection: " "%zd (%s)", err, (strerror (-err))); assert (0); return -ENOTRECOVERABLE; // failed to clear donor status, } } return 0; // do not pass to application } static long _release_sst_flow_control (gcs_conn_t* conn) { long ret = 0; do { if (conn->stop_sent > 0) { ret = gcs_send_fc_event (conn, GCS_FC_CONT); conn->stop_sent -= (ret >= 0); } } while (ret < 0 && -EAGAIN == ret); // we need to send CONT here at all costs ret = gcs_check_error (ret, "Failed to release SST flow control."); return ret; } static void gcs_become_joined (gcs_conn_t* conn) { long ret; if (GCS_CONN_JOINER == conn->state) { ret = _release_sst_flow_control (conn); if (ret < 0) { gu_fatal ("Releasing SST flow control failed: %ld (%s)", ret, strerror (-ret)); abort(); } conn->timeout = GU_TIME_ETERNITY; } /* See also gcs_handle_act_conf () for a case of cluster bootstrapping */ if (gcs_shift_state (conn, GCS_CONN_JOINED)) { conn->fc_offset = conn->queue_len; conn->need_to_join = false; gu_debug("Become joined, FC offset %ld", conn->fc_offset); /* One of the cases when the node can become SYNCED */ if ((ret = gcs_send_sync (conn))) { gu_warn ("Sending SYNC failed: %ld (%s)", ret, strerror (-ret)); } } else { assert (0); } } static void gcs_become_synced (gcs_conn_t* conn) { gu_fifo_lock(conn->recv_q); { gcs_shift_state (conn, GCS_CONN_SYNCED); conn->sync_sent(false); } gu_fifo_release(conn->recv_q); gu_debug("Become synced, FC offset %ld", conn->fc_offset); conn->fc_offset = 0; } /* to be called under protection of both recv_q and fc_lock */ static void _set_fc_limits (gcs_conn_t* conn) { /* Killing two birds with one stone: flat FC profile for master-slave setups * plus #440: giving single node some slack at some math correctness exp.*/ double const fn (conn->params.fc_master_slave ? 1.0 : sqrt(double(conn->memb_num))); conn->upper_limit = conn->params.fc_base_limit * fn + .5; conn->lower_limit = conn->upper_limit * conn->params.fc_resume_factor + .5; gu_info ("Flow-control interval: [%ld, %ld]", conn->lower_limit, conn->upper_limit); } /*! Handles flow control events * (this is frequent, so leave it inlined) */ static inline void gcs_handle_flow_control (gcs_conn_t* conn, const struct gcs_fc_event* fc) { if (gtohl(fc->conf_id) != (uint32_t)conn->conf_id) { // obsolete fc request return; } conn->stop_count += ((fc->stop != 0) << 1) - 1; // +1 if !0, -1 if 0 conn->stats_fc_received += (fc->stop != 0); if (1 == conn->stop_count) { gcs_sm_pause (conn->sm); // first STOP request } else if (0 == conn->stop_count) { gcs_sm_continue (conn->sm); // last CONT request } return; } static void _reset_pkt_size(gcs_conn_t* conn) { if (conn->state != GCS_CONN_CLOSED) return; // #600 workaround long ret; if (0 > (ret = gcs_core_set_pkt_size (conn->core, conn->params.max_packet_size))) { gu_warn ("Failed to set packet size: %ld (%s)", ret, strerror(-ret)); } } static long _join (gcs_conn_t* conn, gcs_seqno_t seqno) { long err; while (-EAGAIN == (err = gcs_core_send_join (conn->core, seqno))) usleep (10000); switch (err) { case -ENOTCONN: gu_warn ("Sending JOIN failed: %d (%s). " "Will retry in new primary component.", err, strerror(-err)); case 0: return 0; default: gu_error ("Sending JOIN failed: %d (%s).", err, strerror(-err)); return err; } } /*! Handles configuration action */ // TODO: this function does not provide any way for recv_thread to gracefully // exit in case of self-leave message. static void gcs_handle_act_conf (gcs_conn_t* conn, const void* action) { const gcs_act_conf_t* conf = (const gcs_act_conf_t*)action; long ret; conn->my_idx = conf->my_idx; gu_fifo_lock(conn->recv_q); { /* reset flow control as membership is most likely changed */ if (!gu_mutex_lock (&conn->fc_lock)) { conn->stop_sent = 0; conn->stop_count = 0; conn->conf_id = conf->conf_id; conn->memb_num = conf->memb_num; _set_fc_limits (conn); gu_mutex_unlock (&conn->fc_lock); } else { gu_fatal ("Failed to lock mutex."); abort(); } conn->sync_sent(false); // need to wake up send monitor if it was paused during CC gcs_sm_continue(conn->sm); } gu_fifo_release (conn->recv_q); if (conf->conf_id < 0) { if (0 == conf->memb_num) { assert (conf->my_idx < 0); gu_info ("Received SELF-LEAVE. Closing connection."); gcs_shift_state (conn, GCS_CONN_CLOSED); } else { gu_info ("Received NON-PRIMARY."); assert (GCS_NODE_STATE_NON_PRIM == conf->my_state); gcs_become_open (conn); conn->global_seqno = conf->seqno; } return; } assert (conf->conf_id >= 0); /* */ if (conf->memb_num < 1) { gu_fatal ("Internal error: PRIMARY configuration with %d nodes", conf->memb_num); abort(); } if (conf->my_idx < 0 || conf->my_idx >= conf->memb_num) { gu_fatal ("Internal error: index of this node (%d) is out of bounds: " "[%d, %d]", conf->my_idx, 0, conf->memb_num - 1); abort(); } if (conf->my_state < GCS_NODE_STATE_PRIM) { gu_fatal ("Internal error: NON-PRIM node state in PRIM configuraiton"); abort(); } /* */ conn->global_seqno = conf->seqno; /* at this point we have established protocol version, * so can set packet size */ // Ticket #600: commented out as unsafe under load _reset_pkt_size(conn); const gcs_conn_state_t old_state = conn->state; switch (conf->my_state) { case GCS_NODE_STATE_PRIM: gcs_become_primary(conn); return; /* Below are not real state transitions, rather state recovery, * so bypassing state transition matrix */ case GCS_NODE_STATE_JOINER: conn->state = GCS_CONN_JOINER; break; case GCS_NODE_STATE_DONOR: conn->state = GCS_CONN_DONOR; break; case GCS_NODE_STATE_JOINED: conn->state = GCS_CONN_JOINED; break; case GCS_NODE_STATE_SYNCED: conn->state = GCS_CONN_SYNCED; break; default: gu_fatal ("Internal error: unrecognized node state: %d", conf->my_state); abort(); } if (old_state != conn->state) { gu_info ("Restored state %s -> %s (%lld)", gcs_conn_state_str[old_state], gcs_conn_state_str[conn->state], conn->global_seqno); } switch (conn->state) { case GCS_CONN_JOINED: /* One of the cases when the node can become SYNCED */ if ((ret = gcs_send_sync(conn)) < 0) { gu_warn ("CC: sending SYNC failed: %ld (%s)", ret, strerror (-ret)); } break; case GCS_CONN_JOINER: case GCS_CONN_DONOR: /* #603, #606 - duplicate JOIN msg in case we lost it */ assert (conf->conf_id >= 0); if (conn->need_to_join) _join (conn, conn->join_seqno); break; default: break; } } static long gcs_handle_act_state_req (gcs_conn_t* conn, struct gcs_act_rcvd* rcvd) { if ((gcs_seqno_t)conn->my_idx == rcvd->id) { int const donor_idx = (int)rcvd->id; // to pacify valgrind gu_debug("Got GCS_ACT_STATE_REQ to %i, my idx: %ld", donor_idx, conn->my_idx); // rewrite to pass global seqno for application rcvd->id = conn->global_seqno; return gcs_become_donor (conn); } else { if (rcvd->id >= 0) { gcs_become_joiner (conn); } return 1; // pass to gcs_request_state_transfer() caller. } } /*! Allocates buffer with malloc to pass to the upper layer. */ static long gcs_handle_state_change (gcs_conn_t* conn, const struct gcs_act* act) { gu_debug ("Got '%s' dated %lld", gcs_act_type_to_str (act->type), gcs_seqno_gtoh(*(gcs_seqno_t*)act->buf)); void* buf = malloc (act->buf_len); if (buf) { memcpy (buf, act->buf, act->buf_len); /* Initially act->buf points to internal static recv buffer. * No leak here. */ ((struct gcs_act*)act)->buf = buf; return 1; } else { gu_fatal ("Could not allocate state change action (%zd bytes)", act->buf_len); abort(); return -ENOMEM; } } /*! * Performs work requred by action in current context. * @return negative error code, 0 if action should be discarded, 1 if should be * passed to application. */ static long gcs_handle_actions (gcs_conn_t* conn, struct gcs_act_rcvd* rcvd) { long ret = 0; switch (rcvd->act.type) { case GCS_ACT_FLOW: assert (sizeof(struct gcs_fc_event) == rcvd->act.buf_len); gcs_handle_flow_control (conn, (const gcs_fc_event*)rcvd->act.buf); break; case GCS_ACT_CONF: gcs_handle_act_conf (conn, rcvd->act.buf); ret = 1; break; case GCS_ACT_STATE_REQ: ret = gcs_handle_act_state_req (conn, rcvd); break; case GCS_ACT_JOIN: ret = gcs_handle_state_change (conn, &rcvd->act); if (gcs_seqno_gtoh(*(gcs_seqno_t*)rcvd->act.buf) < 0 && GCS_CONN_JOINER == conn->state) gcs_become_primary (conn); else gcs_become_joined (conn); break; case GCS_ACT_SYNC: if (rcvd->id < 0) { gu_fifo_lock(conn->recv_q); conn->sync_sent(false); gu_fifo_release(conn->recv_q); gcs_send_sync(conn); } else { ret = gcs_handle_state_change (conn, &rcvd->act); gcs_become_synced (conn); } break; default: break; } return ret; } static inline void GCS_FIFO_PUSH_TAIL (gcs_conn_t* conn, ssize_t size) { conn->recv_q_size += size; gu_fifo_push_tail(conn->recv_q); } /* Returns true if timeout was handled and false otherwise */ static bool _handle_timeout (gcs_conn_t* conn) { bool ret; long long now = gu_time_calendar(); /* TODO: now the only point for timeout is flow control (#412), * later we might need to handle more timers. */ if (conn->timeout <= now) { ret = ((GCS_CONN_JOINER != conn->state) || (_release_sst_flow_control (conn) >= 0)); } else { gu_error ("Unplanned timeout! (tout: %lld, now: %lld)", conn->timeout, now); ret = false; } conn->timeout = GU_TIME_ETERNITY; return ret; } static long _check_recv_queue_growth (gcs_conn_t* conn, ssize_t size) { assert (GCS_CONN_JOINER == conn->state); long ret = 0; long long pause = gcs_fc_process (&conn->stfc, size); if (pause > 0) { /* replication needs throttling */ if (conn->stop_sent <= 0) { if ((ret = gcs_send_fc_event (conn, GCS_FC_STOP)) >= 0) { conn->stop_sent++; ret = 0; } else { ret = gcs_check_error (ret, "Failed to send SST FC_STOP."); } } if (gu_likely(pause != GU_TIME_ETERNITY)) { if (GU_TIME_ETERNITY == conn->timeout) { conn->timeout = gu_time_calendar(); } conn->timeout += pause; // we need to track pauses regardless } else if (conn->timeout != GU_TIME_ETERNITY) { conn->timeout = GU_TIME_ETERNITY; gu_warn ("Replication paused until state transfer is complete " "due to reaching hard limit on the writeset queue size."); } return ret; } else { return pause; // 0 or error code } } static long _close(gcs_conn_t* conn, bool join_recv_thread) { /* all possible races in connection closing should be resolved by * the following call, it is thread-safe */ long ret; if (gu_atomic_fetch_and_add(&conn->inner_close_count, 1) != 0) { return -EALREADY; } if (!(ret = gcs_sm_close (conn->sm))) { // we ignore return value on purpose. the reason is // we can not tell why self-leave message is generated. // there are two possible reasons. // 1. gcs_core_close is called. // 2. GCommConn::run() caught exception. (void)gcs_core_close (conn->core); if (join_recv_thread) { /* if called from gcs_close(), we need to synchronize with gcs_recv_thread at this point */ if ((ret = gu_thread_join (conn->recv_thread, NULL))) { gu_error ("Failed to join recv_thread(): %d (%s)", -ret, strerror(-ret)); } else { gu_info ("recv_thread() joined."); } /* recv_thread() is supposed to set state to CLOSED when exiting */ assert (GCS_CONN_CLOSED == conn->state); } gu_info ("Closing replication queue."); struct gcs_repl_act** act_ptr; /* At this point (state == CLOSED) no new threads should be able to * queue for repl (check gcs_repl()), and recv thread is joined, so no * new actions will be received. Abort threads that are still waiting * in repl queue */ while ((act_ptr = (struct gcs_repl_act**)gcs_fifo_lite_get_head (conn->repl_q))) { struct gcs_repl_act* act = *act_ptr; gcs_fifo_lite_pop_head (conn->repl_q); /* This will wake up repl threads in repl_q - * they'll quit on their own, * they don't depend on the conn object after waking */ gu_mutex_lock (&act->wait_mutex); gu_cond_signal (&act->wait_cond); gu_mutex_unlock (&act->wait_mutex); } gcs_fifo_lite_close (conn->repl_q); /* wake all gcs_recv() threads () */ // FIXME: this can block waiting for applicaiton threads to fetch all // items. In certain situations this can block forever. Ticket #113 gu_info ("Closing slave action queue."); gu_fifo_close (conn->recv_q); } return ret; } /* * gcs_recv_thread() receives whatever actions arrive from group, * and performs necessary actions based on action type. */ static void *gcs_recv_thread (void *arg) { gcs_conn_t* conn = (gcs_conn_t*)arg; ssize_t ret = -ECONNABORTED; // To avoid race between gcs_open() and the following state check in while() gu_cond_t tmp_cond; /* TODO: rework when concurrency in SM is allowed */ gu_cond_init (&tmp_cond, NULL); gcs_sm_enter(conn->sm, &tmp_cond, false, true); gcs_sm_leave(conn->sm); gu_cond_destroy (&tmp_cond); while (conn->state < GCS_CONN_CLOSED) { gcs_seqno_t this_act_id = GCS_SEQNO_ILL; struct gcs_repl_act** repl_act_ptr; struct gcs_act_rcvd rcvd; ret = gcs_core_recv (conn->core, &rcvd, conn->timeout); if (gu_unlikely(ret <= 0)) { if (-ETIMEDOUT == ret && _handle_timeout(conn)) continue; struct gcs_recv_act* err_act = (struct gcs_recv_act*) gu_fifo_get_tail(conn->recv_q); assert (NULL == rcvd.act.buf); assert (0 == rcvd.act.buf_len); assert (GCS_ACT_ERROR == rcvd.act.type); assert (GCS_SEQNO_ILL == rcvd.id); err_act->rcvd = rcvd; err_act->local_id = GCS_SEQNO_ILL; GCS_FIFO_PUSH_TAIL (conn, rcvd.act.buf_len); gu_debug ("gcs_core_recv returned %d: %s", ret, strerror(-ret)); break; } // gu_info ("Received action type: %d, size: %d, global seqno: %lld", // act_type, act_size, (long long)act_id); assert (rcvd.act.type < GCS_ACT_ERROR); assert (ret == rcvd.act.buf_len); if (gu_unlikely(rcvd.act.type >= GCS_ACT_STATE_REQ)) { ret = gcs_handle_actions (conn, &rcvd); if (gu_unlikely(ret < 0)) { // error gu_debug ("gcs_handle_actions returned %d: %s", ret, strerror(-ret)); break; } if (gu_likely(ret <= 0)) continue; // not for application } /* deliver to application (note matching assert in the bottom-half of * gcs_repl()) */ if (gu_likely (rcvd.act.type != GCS_ACT_TORDERED || (rcvd.id > 0 && (conn->global_seqno = rcvd.id)))) { /* successful delivery - increment local order */ this_act_id = gu_atomic_fetch_and_add(&conn->local_act_id, 1); } if (NULL != rcvd.local && (repl_act_ptr = (struct gcs_repl_act**) gcs_fifo_lite_get_head (conn->repl_q)) && (gu_likely ((*repl_act_ptr)->act_in == rcvd.local) || /* at this point repl_q is locked and we need to unlock it and * return false to fall to the 'else' branch; unlikely case */ (gcs_fifo_lite_release (conn->repl_q), false))) { /* local action from repl_q */ struct gcs_repl_act* repl_act = *repl_act_ptr; gcs_fifo_lite_pop_head (conn->repl_q); assert (repl_act->action->type == rcvd.act.type); assert (repl_act->action->size == rcvd.act.buf_len || repl_act->action->type == GCS_ACT_STATE_REQ); repl_act->action->buf = rcvd.act.buf; repl_act->action->seqno_g = rcvd.id; repl_act->action->seqno_l = this_act_id; gu_mutex_lock (&repl_act->wait_mutex); gu_cond_signal (&repl_act->wait_cond); gu_mutex_unlock (&repl_act->wait_mutex); } else if (gu_likely(this_act_id >= 0)) { /* remote/non-repl'ed action */ struct gcs_recv_act* recv_act = (struct gcs_recv_act*)gu_fifo_get_tail (conn->recv_q); if (gu_likely (NULL != recv_act)) { recv_act->rcvd = rcvd; recv_act->local_id = this_act_id; conn->queue_len = gu_fifo_length (conn->recv_q) + 1; bool send_stop = gcs_fc_stop_begin (conn); // release queue GCS_FIFO_PUSH_TAIL (conn, rcvd.act.buf_len); if (gu_unlikely(GCS_CONN_JOINER == conn->state)) { ret = _check_recv_queue_growth (conn, rcvd.act.buf_len); assert (ret <= 0); if (ret < 0) break; } if (gu_unlikely(send_stop) && (ret = gcs_fc_stop_end(conn))) { gu_error ("gcs_fc_stop() returned %d: %s", ret, strerror(-ret)); break; } } else { assert (GCS_CONN_CLOSED == conn->state); ret = -EBADFD; break; } // gu_info("Received foreign action of type %d, size %d, id=%llu, " // "action %p", rcvd.act.type, rcvd.act.buf_len, // this_act_id, rcvd.act.buf); } else if (conn->my_idx == rcvd.sender_idx) { gu_fatal("Protocol violation: unordered local action not in repl_q:" " { {%p, %zd, %s}, %ld, %lld }.", rcvd.act.buf, rcvd.act.buf_len, gcs_act_type_to_str(rcvd.act.type), rcvd.sender_idx, rcvd.id); assert(0); ret = -ENOTRECOVERABLE; break; } else { gu_fatal ("Protocol violation: unordered remote action: " "{ {%p, %zd, %s}, %ld, %lld }", rcvd.act.buf, rcvd.act.buf_len, gcs_act_type_to_str(rcvd.act.type), rcvd.sender_idx, rcvd.id); assert (0); ret = -ENOTRECOVERABLE; break; } } if (ret > 0) { ret = 0; } else if (ret < 0) { /* In case of error call _close() to release repl_q waiters. */ (void)_close(conn, false); gcs_shift_state (conn, GCS_CONN_CLOSED); } gu_info ("RECV thread exiting %d: %s", ret, strerror(-ret)); return NULL; } /* Opens connection to group */ long gcs_open (gcs_conn_t* conn, const char* channel, const char* url, bool const bootstrap) { long ret = 0; if ((ret = gcs_sm_open(conn->sm))) return ret; // open in case it is closed gu_cond_t tmp_cond; /* TODO: rework when concurrency in SM is allowed */ gu_cond_init (&tmp_cond, NULL); if ((ret = gcs_sm_enter (conn->sm, &tmp_cond, false, true))) { gu_error("Failed to enter send monitor: %d (%s)", ret, strerror(-ret)); return ret; } if (GCS_CONN_CLOSED == conn->state) { if (!(ret = gcs_core_open (conn->core, channel, url, bootstrap))) { _reset_pkt_size(conn); if (!(ret = gu_thread_create (&conn->recv_thread, NULL, gcs_recv_thread, conn))) { gcs_fifo_lite_open(conn->repl_q); gu_fifo_open(conn->recv_q); gcs_shift_state (conn, GCS_CONN_OPEN); gu_info ("Opened channel '%s'", channel); conn->inner_close_count = 0; conn->outer_close_count = 0; goto out; } else { gu_error ("Failed to create main receive thread: %ld (%s)", ret, strerror(-ret)); } gcs_core_close (conn->core); } else { gu_error ("Failed to open channel '%s' at '%s': %d (%s)", channel, url, ret, strerror(-ret)); } } else { gu_error ("Bad GCS connection state: %d (%s)", conn->state, gcs_conn_state_str[conn->state]); ret = -EBADFD; } out: gcs_sm_leave (conn->sm); gu_cond_destroy (&tmp_cond); return ret; } /* Closes group connection */ /* After it returns, application should have all time in the world to cancel * and join threads which try to access the handle, before calling gcs_destroy() * on it. */ long gcs_close (gcs_conn_t *conn) { long ret; if (gu_atomic_fetch_and_add(&conn->outer_close_count, 1) != 0) { return -EALREADY; } if ((ret = _close(conn, true)) == -EALREADY) { gu_info("recv_thread() already closing, joining thread."); /* _close() has already been called by gcs_recv_thread() and it is taking care of cleanup, just join the thread */ if ((ret = gu_thread_join (conn->recv_thread, NULL))) { gu_error ("Failed to join recv_thread(): %d (%s)", -ret, strerror(-ret)); } else { gu_info ("recv_thread() joined."); } } /* recv_thread() is supposed to set state to CLOSED when exiting */ assert (GCS_CONN_CLOSED == conn->state); return ret; } /* Frees resources associated with GCS connection handle */ long gcs_destroy (gcs_conn_t *conn) { long err; gu_cond_t tmp_cond; gu_cond_init (&tmp_cond, NULL); if ((err = gcs_sm_enter (conn->sm, &tmp_cond, false, true))) // need an error here { if (GCS_CONN_CLOSED != conn->state) { if (GCS_CONN_CLOSED > conn->state) gu_error ("Attempt to call gcs_destroy() before gcs_close(): " "state = %d", conn->state); gu_cond_destroy (&tmp_cond); return -EBADFD; } /* this should cancel all recv calls */ gu_fifo_destroy (conn->recv_q); gcs_shift_state (conn, GCS_CONN_DESTROYED); //DELETE conn->err = -EBADFD; /* we must unlock the mutex here to allow unfortunate threads * to acquire the lock and give up gracefully */ } else { gcs_sm_leave (conn->sm); gu_cond_destroy (&tmp_cond); err = -EBADFD; return err; } gu_cond_destroy (&tmp_cond); gcs_sm_destroy (conn->sm); if ((err = gcs_fifo_lite_destroy (conn->repl_q))) { gu_debug ("Error destroying repl FIFO: %d (%s)", err, strerror(-err)); return err; } if ((err = gcs_core_destroy (conn->core))) { gu_debug ("Error destroying core: %d (%s)", err, strerror(-err)); return err; } /* This must not last for long */ while (gu_mutex_destroy (&conn->fc_lock)); _cleanup_params (conn); gu_free (conn); return 0; } /* Puts action in the send queue and returns */ long gcs_sendv (gcs_conn_t* const conn, const struct gu_buf* const act_bufs, size_t const act_size, gcs_act_type_t const act_type, bool const scheduled) { if (gu_unlikely(act_size > GCS_MAX_ACT_SIZE)) return -EMSGSIZE; long ret = -ENOTCONN; /*! locking connection here to avoid race with gcs_close() * @note: gcs_repl() and gcs_recv() cannot lock connection * because they block indefinitely waiting for actions */ gu_cond_t tmp_cond; gu_cond_init (&tmp_cond, NULL); if (!(ret = gcs_sm_enter (conn->sm, &tmp_cond, scheduled, true))) { while ((GCS_CONN_OPEN >= conn->state) && (ret = gcs_core_send (conn->core, act_bufs, act_size, act_type)) == -ERESTART); gcs_sm_leave (conn->sm); gu_cond_destroy (&tmp_cond); } return ret; } long gcs_schedule (gcs_conn_t* conn) { return gcs_sm_schedule (conn->sm); } long gcs_interrupt (gcs_conn_t* conn, long handle) { return gcs_sm_interrupt (conn->sm, handle); } gcs_seqno_t gcs_caused(gcs_conn_t* conn) { return gcs_core_caused(conn->core); } /* Puts action in the send queue and returns after it is replicated */ long gcs_replv (gcs_conn_t* const conn, //!size > GCS_MAX_ACT_SIZE)) return -EMSGSIZE; long ret; assert (act); assert (act->size > 0); act->seqno_l = GCS_SEQNO_ILL; act->seqno_g = GCS_SEQNO_ILL; /* This is good - we don't have to do a copy because we wait */ struct gcs_repl_act repl_act(act_in, act); gu_mutex_init (&repl_act.wait_mutex, NULL); gu_cond_init (&repl_act.wait_cond, NULL); /* Send action and wait for signal from recv_thread * we need to lock a mutex before we can go wait for signal */ if (!(ret = gu_mutex_lock (&repl_act.wait_mutex))) { // Lock here does the following: // 1. serializes gcs_core_send() access between gcs_repl() and // gcs_send() // 2. avoids race with gcs_close() and gcs_destroy() if (!(ret = gcs_sm_enter (conn->sm, &repl_act.wait_cond, scheduled, true))) { struct gcs_repl_act** act_ptr; //#ifndef NDEBUG const void* const orig_buf = act->buf; //#endif // some hack here to achieve one if() instead of two: // ret = -EAGAIN part is a workaround for #569 // if (conn->state >= GCS_CONN_CLOSE) or (act_ptr == NULL) // ret will be -ENOTCONN if ((ret = -EAGAIN, conn->upper_limit >= conn->queue_len || act->type != GCS_ACT_TORDERED) && (ret = -ENOTCONN, GCS_CONN_OPEN >= conn->state) && (act_ptr = (struct gcs_repl_act**)gcs_fifo_lite_get_tail (conn->repl_q))) { *act_ptr = &repl_act; gcs_fifo_lite_push_tail (conn->repl_q); // Keep on trying until something else comes out while ((ret = gcs_core_send (conn->core, act_in, act->size, act->type)) == -ERESTART) {} if (ret < 0) { /* remove item from the queue, it will never be delivered */ gu_warn ("Send action {%p, %zd, %s} returned %d (%s)", act->buf, act->size,gcs_act_type_to_str(act->type), ret, strerror(-ret)); if (!gcs_fifo_lite_remove (conn->repl_q)) { gu_fatal ("Failed to remove unsent item from repl_q"); assert(0); ret = -ENOTRECOVERABLE; } } else { assert (ret == (ssize_t)act->size); } } gcs_sm_leave (conn->sm); assert(ret); /* now we can go waiting for action delivery */ if (ret >= 0) { gu_cond_wait (&repl_act.wait_cond, &repl_act.wait_mutex); #ifndef GCS_FOR_GARB /* assert (act->buf != 0); */ if (act->buf == 0) { /* Recv thread purged repl_q before action was delivered */ ret = -ENOTCONN; goto out; } #else assert (act->buf == 0); #endif /* GCS_FOR_GARB */ if (act->seqno_g < 0) { assert (GCS_SEQNO_ILL == act->seqno_l || GCS_ACT_TORDERED != act->type); if (act->seqno_g == GCS_SEQNO_ILL) { /* action was not replicated for some reason */ assert (orig_buf == act->buf); ret = -EINTR; } else { /* core provided an error code in global seqno */ assert (orig_buf != act->buf); ret = act->seqno_g; act->seqno_g = GCS_SEQNO_ILL; } if (orig_buf != act->buf) // action was allocated in gcache { gu_debug("Freeing gcache buffer %p after receiving %d", act->buf, ret); gcs_gcache_free (conn->gcache, act->buf); act->buf = orig_buf; } } } } #ifndef GCS_FOR_GARB out: #endif /* GCS_FOR_GARB */ gu_mutex_unlock (&repl_act.wait_mutex); } gu_mutex_destroy (&repl_act.wait_mutex); gu_cond_destroy (&repl_act.wait_cond); #ifdef GCS_DEBUG_GCS // gu_debug ("\nact_size = %u\nact_type = %u\n" // "act_id = %llu\naction = %p (%s)\n", // act->size, act->type, act->seqno_g, act->buf, act->buf); #endif return ret; } long gcs_request_state_transfer (gcs_conn_t *conn, int version, const void *req, size_t size, const char *donor, const gu_uuid_t* ist_uuid, gcs_seqno_t ist_seqno, gcs_seqno_t *local) { long ret = -ENOMEM; size_t donor_len = strlen(donor) + 1; // include terminating \0 size_t rst_size = size + donor_len + sizeof(*ist_uuid) + sizeof(ist_seqno) + 2; // for simplicity, allocate maximum space what we need here. char* rst = (char*)gu_malloc (rst_size); *local = GCS_SEQNO_ILL; if (rst) { gu_debug("ist_uuid[" GU_UUID_FORMAT "], ist_seqno[%lld]", GU_UUID_ARGS(ist_uuid), (long long)ist_seqno); int offset = 0; // version 0,1 /* RST format: |donor name|\0|app request| * anything more complex will require a special (de)serializer. * NOTE: this is sender part. Check gcs_group_handle_state_request() * for the receiver part. */ if (version < 2) { memcpy (rst + offset, donor, donor_len); offset += donor_len; memcpy (rst + offset, req, size); rst_size = size + donor_len; } // version 2(expose joiner's seqno and smart donor selection) // RST format: |donor_name|\0|'V'|version|ist_uuid|ist_seqno|app_request| // we expect 'version' could be hold by 'char' // since app_request v0 starts with sst method name // and app_request v1 starts with 'STRv1' // and ist_uuid starts with hex character in lower case. // it's safe to use 'V' as separator. else { memcpy (rst + offset, donor, donor_len); offset += donor_len; rst[offset++] = 'V'; rst[offset++] = (char)version; memcpy (rst + offset, ist_uuid, sizeof(*ist_uuid)); offset += sizeof(*ist_uuid); *(gcs_seqno_t*) (rst + offset) = gcs_seqno_htog(ist_seqno); offset += sizeof(ist_seqno); memcpy (rst + offset, req, size); } struct gcs_action action; action.buf = rst; action.size = (ssize_t)rst_size; action.type = GCS_ACT_STATE_REQ; ret = gcs_repl(conn, &action, false); gu_free (rst); *local = action.seqno_l; if (ret > 0) { assert (action.buf != rst); #ifndef GCS_FOR_GARB assert (action.buf != NULL); gcs_gcache_free (conn->gcache, action.buf); #else assert (action.buf == NULL); #endif assert (ret == (ssize_t)rst_size); assert (action.seqno_g >= 0); assert (action.seqno_l > 0); // on joiner global seqno stores donor index // on donor global seqno stores global seqno ret = action.seqno_g; } else { assert (/*action.buf == NULL ||*/ action.buf == rst); } } return ret; } long gcs_desync (gcs_conn_t* conn, gcs_seqno_t* local) { gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; // for desync operation we use the lowest str_version. long ret = gcs_request_state_transfer (conn, 0, "", 1, GCS_DESYNC_REQ, &ist_uuid, ist_seqno, local); if (ret >= 0) { return 0; } else { return ret; } } static inline void GCS_FIFO_POP_HEAD (gcs_conn_t* conn, ssize_t size) { assert (conn->recv_q_size >= size); conn->recv_q_size -= size; gu_fifo_pop_head (conn->recv_q); } /* Returns when an action from another process is received */ long gcs_recv (gcs_conn_t* conn, struct gcs_action* action) { int err; struct gcs_recv_act* recv_act = NULL; assert (action); if ((recv_act = (struct gcs_recv_act*)gu_fifo_get_head (conn->recv_q, &err))) { conn->queue_len = gu_fifo_length (conn->recv_q) - 1; bool send_cont = gcs_fc_cont_begin (conn); bool send_sync = gcs_send_sync_begin (conn); action->buf = (void*)recv_act->rcvd.act.buf; action->size = recv_act->rcvd.act.buf_len; action->type = recv_act->rcvd.act.type; action->seqno_g = recv_act->rcvd.id; action->seqno_l = recv_act->local_id; if (gu_unlikely (GCS_ACT_CONF == action->type)) { err = gu_fifo_cancel_gets (conn->recv_q); if (err) { gu_fatal ("Internal logic error: failed to cancel recv_q " "\"gets\": %d (%s). Aborting.", err, strerror(-err)); gu_abort(); } } GCS_FIFO_POP_HEAD (conn, action->size); // release the queue if (gu_unlikely(send_cont) && (err = gcs_fc_cont_end(conn))) { // We have successfully received an action, but failed to send // important control message. What do we do? Inability to send CONT // can block the whole cluster. There are only conn->queue_len - 1 // attempts to do that (that's how many times we'll get here). // Perhaps if the last attempt fails, we should crash. if (conn->queue_len > 0) { gu_warn ("Failed to send CONT message: %d (%s). " "Attempts left: %ld", err, strerror(-err), conn->queue_len); } else { gu_fatal ("Last opportunity to send CONT message failed: " "%d (%s). Aborting to avoid cluster lock-up...", err, strerror(-err)); gcs_close(conn); gu_abort(); } } else if (gu_unlikely(send_sync) && (err = gcs_send_sync_end (conn))) { gu_warn ("Failed to send SYNC message: %d (%s). Will try later.", err, strerror(-err)); } return action->size; } else { action->buf = NULL; action->size = 0; action->type = GCS_ACT_ERROR; action->seqno_g = GCS_SEQNO_ILL; action->seqno_l = GCS_SEQNO_ILL; switch (err) { case -ENODATA: assert (GCS_CONN_CLOSED == conn->state); return GCS_CLOSED_ERROR; default: return err; } } } long gcs_resume_recv (gcs_conn_t* conn) { int ret = GCS_CLOSED_ERROR; ret = gu_fifo_resume_gets (conn->recv_q); if (ret) { if (conn->state < GCS_CONN_CLOSED) { gu_fatal ("Internal logic error: failed to resume \"gets\" on " "recv_q: %d (%s). Aborting.", ret, strerror (-ret)); gcs_close (conn); gu_abort(); } else { ret = GCS_CLOSED_ERROR; } } return ret; } long gcs_wait (gcs_conn_t* conn) { if (gu_likely(GCS_CONN_SYNCED == conn->state)) { return (conn->stop_count > 0 || (conn->queue_len > conn->upper_limit)); } else { switch (conn->state) { case GCS_CONN_OPEN: return -ENOTCONN; case GCS_CONN_CLOSED: case GCS_CONN_DESTROYED: return GCS_CLOSED_ERROR; default: return -EAGAIN; // wait until get sync } } } long gcs_conf_set_pkt_size (gcs_conn_t *conn, long pkt_size) { if (conn->params.max_packet_size == pkt_size) return pkt_size; return gcs_set_pkt_size (conn, pkt_size); } long gcs_set_last_applied (gcs_conn_t* conn, gcs_seqno_t seqno) { gu_cond_t cond; gu_cond_init (&cond, NULL); long ret = gcs_sm_enter (conn->sm, &cond, false, false); if (!ret) { ret = gcs_core_set_last_applied (conn->core, seqno); gcs_sm_leave (conn->sm); } gu_cond_destroy (&cond); return ret; } long gcs_join (gcs_conn_t* conn, gcs_seqno_t seqno) { conn->join_seqno = seqno; conn->need_to_join = true; return _join (conn, seqno); } gcs_seqno_t gcs_local_sequence(gcs_conn_t* conn) { return gu_atomic_fetch_and_add(&conn->local_act_id, 1); } void gcs_get_stats (gcs_conn_t* conn, struct gcs_stats* stats) { gu_fifo_stats_get (conn->recv_q, &stats->recv_q_len, &stats->recv_q_len_max, &stats->recv_q_len_min, &stats->recv_q_len_avg); stats->recv_q_size = conn->recv_q_size; gcs_sm_stats_get (conn->sm, &stats->send_q_len, &stats->send_q_len_max, &stats->send_q_len_min, &stats->send_q_len_avg, &stats->fc_paused_ns, &stats->fc_paused_avg); stats->fc_sent = conn->stats_fc_sent; stats->fc_received = conn->stats_fc_received; } void gcs_flush_stats(gcs_conn_t* conn) { gu_fifo_stats_flush(conn->recv_q); gcs_sm_stats_flush (conn->sm); conn->stats_fc_sent = 0; conn->stats_fc_received = 0; } void gcs_get_status(gcs_conn_t* conn, gu::Status& status) { if (conn->state < GCS_CONN_CLOSED) { gcs_core_get_status(conn->core, status); } } static long _set_fc_limit (gcs_conn_t* conn, const char* value) { long long limit; const char* const endptr = gu_str2ll(value, &limit); if (limit > 0LL && *endptr == '\0') { if (limit > LONG_MAX) limit = LONG_MAX; gu_fifo_lock(conn->recv_q); { if (!gu_mutex_lock (&conn->fc_lock)) { conn->params.fc_base_limit = limit; _set_fc_limits (conn); gu_config_set_int64 (conn->config, GCS_PARAMS_FC_LIMIT, conn->params.fc_base_limit); gu_mutex_unlock (&conn->fc_lock); } else { gu_fatal ("Failed to lock mutex."); abort(); } } gu_fifo_release (conn->recv_q); return 0; } else { return -EINVAL; } } static long _set_fc_factor (gcs_conn_t* conn, const char* value) { double factor; const char* const endptr = gu_str2dbl(value, &factor); if (factor >= 0.0 && factor <= 1.0 && *endptr == '\0') { if (factor == conn->params.fc_resume_factor) return 0; gu_fifo_lock(conn->recv_q); { if (!gu_mutex_lock (&conn->fc_lock)) { conn->params.fc_resume_factor = factor; _set_fc_limits (conn); gu_config_set_double (conn->config, GCS_PARAMS_FC_FACTOR, conn->params.fc_resume_factor); gu_mutex_unlock (&conn->fc_lock); } else { gu_fatal ("Failed to lock mutex."); abort(); } } gu_fifo_release (conn->recv_q); return 0; } else { return -EINVAL; } } static long _set_fc_debug (gcs_conn_t* conn, const char* value) { bool debug; const char* const endptr = gu_str2bool(value, &debug); if (*endptr == '\0') { if (conn->params.fc_debug == debug) return 0; conn->params.fc_debug = debug; gcs_fc_debug (&conn->stfc, debug); gu_config_set_bool (conn->config, GCS_PARAMS_FC_DEBUG, debug); return 0; } else { return -EINVAL; } } static long _set_sync_donor (gcs_conn_t* conn, const char* value) { bool sd; const char* const endptr = gu_str2bool (value, &sd); if (endptr[0] != '\0') return -EINVAL; if (conn->params.sync_donor != sd) { conn->params.sync_donor = sd; conn->max_fc_state = sd ? GCS_CONN_DONOR : GCS_CONN_JOINED; } return 0; } static long _set_pkt_size (gcs_conn_t* conn, const char* value) { long long pkt_size; const char* const endptr = gu_str2ll (value, &pkt_size); if (pkt_size > 0 && *endptr == '\0') { if (pkt_size > LONG_MAX) pkt_size = LONG_MAX; if (conn->params.max_packet_size == pkt_size) return 0; long ret = gcs_set_pkt_size (conn, pkt_size); if (ret >= 0) { ret = 0; gu_config_set_int64(conn->config,GCS_PARAMS_MAX_PKT_SIZE,pkt_size); } return ret; } else { // gu_warn ("Invalid value for %s: '%s'", GCS_PARAMS_PKT_SIZE, value); return -EINVAL; } } static long _set_recv_q_hard_limit (gcs_conn_t* conn, const char* value) { long long limit; const char* const endptr = gu_str2ll (value, &limit); if (limit > 0 && *endptr == '\0') { if (limit > LONG_MAX) limit = LONG_MAX; long long limit_fixed = limit * gcs_fc_hard_limit_fix; if (conn->params.recv_q_hard_limit == limit_fixed) return 0; gu_config_set_int64 (conn->config, GCS_PARAMS_RECV_Q_HARD_LIMIT, limit); conn->params.recv_q_hard_limit = limit_fixed; return 0; } else { return -EINVAL; } } static long _set_recv_q_soft_limit (gcs_conn_t* conn, const char* value) { double dbl; const char* const endptr = gu_str2dbl (value, &dbl); if (dbl >= 0.0 && dbl < 1.0 && *endptr == '\0') { if (dbl == conn->params.recv_q_soft_limit) return 0; gu_config_set_double (conn->config, GCS_PARAMS_RECV_Q_SOFT_LIMIT, dbl); conn->params.recv_q_soft_limit = dbl; return 0; } else { return -EINVAL; } } static long _set_max_throttle (gcs_conn_t* conn, const char* value) { double dbl; const char* const endptr = gu_str2dbl (value, &dbl); if (dbl >= 0.0 && dbl < 1.0 && *endptr == '\0') { if (dbl == conn->params.max_throttle) return 0; gu_config_set_double (conn->config, GCS_PARAMS_MAX_THROTTLE, dbl); conn->params.max_throttle = dbl; return 0; } else { return -EINVAL; } } bool gcs_register_params (gu_config_t* const conf) { return (gcs_params_register (conf) | gcs_core_register (conf)); } long gcs_param_set (gcs_conn_t* conn, const char* key, const char *value) { if (!strcmp (key, GCS_PARAMS_FC_LIMIT)) { return _set_fc_limit (conn, value); } else if (!strcmp (key, GCS_PARAMS_FC_FACTOR)) { return _set_fc_factor (conn, value); } else if (!strcmp (key, GCS_PARAMS_FC_DEBUG)) { return _set_fc_debug (conn, value); } else if (!strcmp (key, GCS_PARAMS_SYNC_DONOR)) { return _set_sync_donor (conn, value); } else if (!strcmp (key, GCS_PARAMS_MAX_PKT_SIZE)) { return _set_pkt_size (conn, value); } else if (!strcmp (key, GCS_PARAMS_RECV_Q_HARD_LIMIT)) { return _set_recv_q_hard_limit (conn, value); } else if (!strcmp (key, GCS_PARAMS_RECV_Q_SOFT_LIMIT)) { return _set_recv_q_soft_limit (conn, value); } else if (!strcmp (key, GCS_PARAMS_MAX_THROTTLE)) { return _set_max_throttle (conn, value); } else { return gcs_core_param_set (conn->core, key, value); } } const char* gcs_param_get (gcs_conn_t* conn, const char* key) { gu_warn ("Not implemented: %s", __FUNCTION__); return NULL; } galera-3-25.3.20/gcs/src/gcs_defrag.hpp0000644000015300001660000000365513042054732017307 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * Receiving action context */ #ifndef _gcs_defrag_h_ #define _gcs_defrag_h_ #include "gcs.hpp" // for gcs_seqno_t et al. #include "gcs_act_proto.hpp" #include "gcs_act.hpp" #include "gcs_gcache.hpp" #include // for memset() #include typedef struct gcs_defrag { gcache_t* cache; gcs_seqno_t sent_id; // sent id (unique for a node) uint8_t* head; // head of action buffer uint8_t* tail; // tail of action data size_t size; size_t received; ulong frag_no; // number of fragment received bool reset; } gcs_defrag_t; static inline void gcs_defrag_init (gcs_defrag_t* df, gcache_t* cache) { memset (df, 0, sizeof (*df)); df->cache = cache; df->sent_id = GCS_SEQNO_ILL; } /*! * Handle received action fragment * * @return 0 - success, * size of action - success, full action received, * negative - error. */ extern ssize_t gcs_defrag_handle_frag (gcs_defrag_t* df, const gcs_act_frag_t* frg, struct gcs_act* act, bool local); /*! Deassociate, but don't deallocate action resources */ static inline void gcs_defrag_forget (gcs_defrag_t* df) { gcs_defrag_init (df, df->cache); } /*! Free resources associated with defrag (for lost node cleanup) */ static inline void gcs_defrag_free (gcs_defrag_t* df) { #ifndef GCS_FOR_GARB if (df->head) { gcs_gcache_free (df->cache, df->head); // df->head, df->tail will be zeroed in gcs_defrag_init() below } #else assert(NULL == df->head); #endif gcs_defrag_init (df, df->cache); } /*! Mark current action as reset */ static inline void gcs_defrag_reset (gcs_defrag_t* df) { df->reset = true; } #endif /* _gcs_defrag_h_ */ galera-3-25.3.20/gcs/src/gcs_act_proto.hpp0000644000015300001660000000345313042054732020045 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * Interface to action protocol * (to be extended to support protocol versions, currently supports only v0) */ #ifndef _gcs_act_proto_h_ #define _gcs_act_proto_h_ #include "gcs.hpp" // for gcs_seqno_t #include #include typedef uint8_t gcs_proto_t; /*! Supported protocol range (for now only version 0 is supported) */ #define GCS_ACT_PROTO_MAX 0 /*! Internal action fragment data representation */ typedef struct gcs_act_frag { gcs_seqno_t act_id; size_t act_size; const void* frag; // shall override it only once size_t frag_len; unsigned long frag_no; gcs_act_type_t act_type; int proto_ver; } gcs_act_frag_t; /*! Writes header data into actual header of the message. * Remainig fragment buf and length is in frag->frag and frag->frag_len */ extern long gcs_act_proto_write (gcs_act_frag_t* frag, void* buf, size_t buf_len); /*! Reads header data from the actual header of the message * Remainig fragment buf and length is in frag->frag and frag->frag_len */ extern long gcs_act_proto_read (gcs_act_frag_t* frag, const void* buf, size_t buf_len); /*! Increments fragment counter when action remains the same. * * @return non-negative counter value on success */ static inline long gcs_act_proto_inc (void* buf) { uint32_t frag_no = gtohl(((uint32_t*)buf)[3]) + 1; #ifdef GCS_DEBUG_PROTO if (!frag_no) return -EOVERFLOW; #endif ((uint32_t*)buf)[3] = htogl(frag_no); return frag_no; } /*! Returns protocol header size */ extern long gcs_act_proto_hdr_size (long version); /*! Returns message protocol version */ static inline int gcs_act_proto_ver (void* buf) { return *((uint8_t*)buf); } #endif /* _gcs_act_proto_h_ */ galera-3-25.3.20/gcs/src/gcs_msg_type.cpp0000644000015300001660000000045013042054732017667 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include "gcs_msg_type.hpp" const char* gcs_msg_type_string[GCS_MSG_MAX] = { "ERROR", "ACTION", "LAST", "COMPONENT", "STATE_UUID", "STATE_MSG", "JOIN", "SYNC", "FLOW", "CAUSAL" }; galera-3-25.3.20/gcs/src/gcs_test.cpp0000644000015300001660000005563113042054732017032 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /***********************************************************/ /* This program imitates 3rd party application and */ /* tests GCS library in a dummy standalone configuration */ /***********************************************************/ #include #include #include #include #include #include #include #include #include #include #include "gcs.hpp" #include "gcs_test.hpp" #define USE_WAIT #define gcs_malloc(a) ((a*) malloc (sizeof (a))) static pthread_mutex_t gcs_test_lock = PTHREAD_MUTEX_INITIALIZER; static gcache_t* gcache = NULL; typedef struct gcs_test_log { FILE *file; pthread_mutex_t lock; } gcs_test_log_t; #define SEND_LOG "/dev/shm/gcs_test_send.log" #define RECV_LOG "/dev/shm/gcs_test_recv.log" static gcs_test_log_t *send_log, *recv_log; static bool throughput = true; // bench for throughput static bool total = true; // also enable TO locking typedef enum { GCS_TEST_SEND, GCS_TEST_RECV, GCS_TEST_REPL } gcs_test_repl_t; typedef struct gcs_test_thread { pthread_t thread; long id; struct gcs_action act; long n_tries; void* msg; char* log_msg; } gcs_test_thread_t; #define MAX_MSG_LEN (1 << 16) static long gcs_test_thread_create (gcs_test_thread_t *t, long id, long n_tries) { t->id = id; t->msg = calloc (MAX_MSG_LEN, sizeof(char)); t->act.buf = t->msg; t->act.size = MAX_MSG_LEN; t->act.seqno_g = GCS_SEQNO_ILL; t->act.seqno_l = GCS_SEQNO_ILL; t->act.type = GCS_ACT_TORDERED; t->n_tries = n_tries; if (t->msg) { t->log_msg = (char*)calloc (MAX_MSG_LEN, sizeof(char)); if (t->log_msg) return 0; } return -ENOMEM; } static long gcs_test_thread_destroy (gcs_test_thread_t *t) { if (t->msg) free (t->msg); if (t->log_msg) free (t->log_msg); return 0; } typedef struct gcs_test_thread_pool { long n_threads; long n_tries; long n_started; gcs_test_repl_t type; gcs_test_thread_t *threads; } gcs_test_thread_pool_t; static long gcs_test_thread_pool_create (gcs_test_thread_pool_t *pool, const gcs_test_repl_t type, const long n_threads, const long n_tries) { long err = 0; long i; // pool = gcs_malloc (gcs_test_thread_pool_t); // if (!pool) { err = errno; goto out; } pool->n_threads = n_threads; pool->type = type; pool->n_tries = n_tries; pool->n_started = 0; pool->threads = (gcs_test_thread_t *) calloc (pool->n_threads, sizeof (gcs_test_thread_t)); if (!pool->threads) { err = errno; fprintf (stderr, "Failed to allocate %ld thread objects: %ld (%s)\n", n_threads, err, strerror(err)); goto out1; } for (i = 0; i < pool->n_threads; i++) { if ((err = gcs_test_thread_create (pool->threads + i, i, n_tries))) { err = errno; fprintf (stderr, "Failed to create thread object %ld: %ld (%s)\n", i, err, strerror(err)); goto out2; } } // printf ("Created %ld thread objects\n", i); return 0; out2: while (i) { i--; gcs_test_thread_destroy (pool->threads + i); } free (pool->threads); out1: free (pool); //out: return err; } static void gcs_test_thread_pool_destroy (gcs_test_thread_pool_t* pool) { long i; if (pool->threads) { for (i = 0; i < pool->n_threads; i++) { gcs_test_thread_destroy (pool->threads + i); } free (pool->threads); } } static pthread_mutex_t make_msg_lock = PTHREAD_MUTEX_INITIALIZER; //static long total_tries; static inline long test_make_msg (char* msg, const long mlen) { static gcs_seqno_t count = 1; long len = 0; if (!throughput) { pthread_mutex_lock (&make_msg_lock); count++; pthread_mutex_unlock (&make_msg_lock); len = snprintf (msg, mlen, "%10d %9llu %s", rand(), (unsigned long long)count++, gcs_test_data); } else { len = rand() % mlen + 1; // just random length, we don't care about // contents } if (len >= mlen) return mlen; else return len; } static long test_log_open (gcs_test_log_t **log, const char *name) { char real_name[1024]; gcs_test_log_t *l = gcs_malloc (gcs_test_log_t); if (!l) return errno; snprintf (real_name, 1024, "%s.%lld", name, (long long)getpid()); // cppcheck-suppress memleak if (!(l->file = fopen (real_name, "w"))) return errno; pthread_mutex_init (&l->lock, NULL); *log = l; return 0; } static long test_log_close (gcs_test_log_t **log) { long err = 0; gcs_test_log_t *l = *log; if (l) { pthread_mutex_lock (&l->lock); err = fclose (l->file); pthread_mutex_unlock (&l->lock); pthread_mutex_destroy (&l->lock); } return err; } static inline long gcs_test_log_msg (gcs_test_log_t *log, const char *msg) { long err = 0; err = fprintf (log->file, "%s\n", msg); return err; } gcs_conn_t *gcs = NULL; gu_to_t *to = NULL; long msg_sent = 0; long msg_recvd = 0; long msg_repld = 0; long msg_len = 0; size_t size_sent = 0; size_t size_repld = 0; size_t size_recvd = 0; static inline long test_recv_log_create(gcs_test_thread_t* thread) { return snprintf (thread->log_msg, MAX_MSG_LEN - 1, "Thread %3ld(REPL): act_id = %lld, local_act_id = %lld, " "len = %lld: %s", thread->id, (long long)thread->act.seqno_g, (long long)thread->act.seqno_l, (long long)thread->act.size, (const char*)thread->act.buf); } static inline long test_send_log_create(gcs_test_thread_t* thread) { return snprintf (thread->log_msg, MAX_MSG_LEN - 1, "Thread %3ld (REPL): len = %lld, %s", thread->id, (long long) thread->act.size, (const char*)thread->act.buf); } static inline long test_log_msg (gcs_test_log_t* log, const char* msg) { long ret; pthread_mutex_lock (&log->lock); ret = fprintf (recv_log->file, "%s\n", msg); pthread_mutex_lock (&log->lock); return ret; } static inline long test_log_in_to (gu_to_t* to, gcs_seqno_t seqno, const char* msg) { long ret = 0; while ((ret = gu_to_grab (to, seqno)) == -EAGAIN) usleep(10000); if (!ret) {// success if (msg != NULL) gcs_test_log_msg (recv_log, msg); ret = gu_to_release (to, seqno); } return ret; } static gcs_seqno_t group_seqno = 0; static inline long test_send_last_applied (gcs_conn_t* gcs, gcs_seqno_t my_seqno) { long ret = 0; #define SEND_LAST_MASK ((1 << 14) - 1) // every 16K seqno if (!(my_seqno & SEND_LAST_MASK)) { ret = gcs_set_last_applied (gcs, my_seqno); if (ret) { fprintf (stderr,"gcs_set_last_applied(%lld) returned %ld\n", (long long)my_seqno, ret); } // if (!throughput) { fprintf (stdout, "Last applied: my = %lld, group = %lld\n", (long long)my_seqno, (long long)group_seqno); // } } return ret; } static inline long test_before_send (gcs_test_thread_t* thread) { #ifdef USE_WAIT static const struct timespec wait = { 0, 10000000 }; #endif long ret = 0; /* create a message */ thread->act.size = test_make_msg ((char*)thread->msg, msg_len); thread->act.buf = thread->msg; if (thread->act.size <= 0) return -1; if (!throughput) { /* log message before replication */ ret = test_send_log_create (thread); ret = test_log_msg (send_log, thread->log_msg); } #ifdef USE_WAIT while ((ret = gcs_wait(gcs)) && ret > 0) nanosleep (&wait, NULL); #endif return ret; } static inline long test_after_recv (gcs_test_thread_t* thread) { long ret; if (!throughput) { /* log message after replication */ ret = test_recv_log_create (thread); ret = test_log_in_to (to, thread->act.seqno_l, thread->log_msg); } else if (total) { ret = test_log_in_to (to, thread->act.seqno_l, NULL); } else { gu_to_self_cancel (to, thread->act.seqno_l); } ret = test_send_last_applied (gcs, thread->act.seqno_g); // fprintf (stdout, "SEQNO applied %lld", thread->local_act_id); if (thread->act.type == GCS_ACT_TORDERED) gcache_free (gcache, thread->act.buf); return ret; } void *gcs_test_repl (void *arg) { gcs_test_thread_t *thread = (gcs_test_thread_t*)arg; // long i = thread->n_tries; long ret = 0; pthread_mutex_lock (&gcs_test_lock); pthread_mutex_unlock (&gcs_test_lock); while (thread->n_tries) { ret = test_before_send (thread); if (ret < 0) break; /* replicate message */ ret = gcs_repl (gcs, &thread->act, false); if (ret < 0) { assert (thread->act.seqno_g == GCS_SEQNO_ILL); assert (thread->act.seqno_l == GCS_SEQNO_ILL); break; } msg_repld++; size_repld += thread->act.size; // usleep ((rand() & 1) << 1); test_after_recv (thread); // puts (thread->log_msg); fflush (stdout); } // fprintf (stderr, "REPL thread %ld exiting: %s\n", // thread->id, strerror(-ret)); return NULL; } void *gcs_test_send (void *arg) { long ret = 0; gcs_test_thread_t *thread = (gcs_test_thread_t*)arg; // long i = thread->n_tries; pthread_mutex_lock (&gcs_test_lock); pthread_mutex_unlock (&gcs_test_lock); while (thread->n_tries) { ret = test_before_send (thread); if (ret < 0) break; /* send message to group */ ret = gcs_send (gcs, thread->act.buf, thread->act.size, GCS_ACT_TORDERED, false); if (ret < 0) break; //sleep (1); msg_sent++; size_sent += thread->act.size; } // fprintf (stderr, "SEND thread %ld exiting: %s\n", // thread->id, strerror(-ret)); return NULL; } static void gcs_test_handle_configuration (gcs_conn_t* gcs, gcs_test_thread_t* thread) { long ret; static gcs_seqno_t conf_id = 0; gcs_act_conf_t* conf = (gcs_act_conf_t*)thread->msg; gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; fprintf (stdout, "Got GCS_ACT_CONF: Conf: %lld, " "seqno: %lld, members: %ld, my idx: %ld, local seqno: %lld\n", (long long)conf->conf_id, (long long)conf->seqno, conf->memb_num, conf->my_idx, (long long)thread->act.seqno_l); fflush (stdout); // NOTE: what really needs to be checked is seqno and group_uuid, but here // we don't keep track of them (and don't do real transfers), // so for simplicity, just check conf_id. while (-EAGAIN == (ret = gu_to_grab (to, thread->act.seqno_l))); if (0 == ret) { if (conf->my_state == GCS_NODE_STATE_PRIM) { gcs_seqno_t seqno, s; fprintf (stdout,"Gap in configurations: ours: %lld, group: %lld.\n", (long long)conf_id, (long long)conf->conf_id); fflush (stdout); fprintf (stdout, "Requesting state transfer up to %lld: %s\n", (long long)conf->seqno, // this is global seqno strerror (-gcs_request_state_transfer (gcs, 0, &conf->seqno, sizeof(conf->seqno), "", &ist_uuid, ist_seqno, &seqno))); // pretend that state transfer is complete, cancel every action up // to seqno for (s = thread->act.seqno_l + 1; s <= seqno; s++) { gu_to_self_cancel (to, s); // this is local seqno } fprintf (stdout, "Sending JOIN: %s\n", strerror(-gcs_join(gcs, 0))); fflush (stdout); } gcs_resume_recv (gcs); gu_to_release (to, thread->act.seqno_l); } else { fprintf (stderr, "Failed to grab TO: %ld (%s)", ret, strerror(ret)); } conf_id = conf->conf_id; } void *gcs_test_recv (void *arg) { long ret = 0; gcs_test_thread_t *thread = (gcs_test_thread_t*)arg; while (thread->n_tries) { /* receive message from group */ while ((ret = gcs_recv (gcs, &thread->act)) == -ECANCELED) { usleep (10000); } if (ret <= 0) { fprintf (stderr, "gcs_recv() %s: %ld (%s). Thread exits.\n", ret < 0 ? "failed" : "connection closed", ret, strerror(-ret)); assert (thread->act.buf == NULL); assert (thread->act.size == 0); assert (thread->act.seqno_g == GCS_SEQNO_ILL); assert (thread->act.seqno_l == GCS_SEQNO_ILL); assert (thread->act.type == GCS_ACT_ERROR); break; } assert (thread->act.type < GCS_ACT_ERROR); msg_recvd++; size_recvd += thread->act.size; switch (thread->act.type) { case GCS_ACT_TORDERED: test_after_recv (thread); //puts (thread->log_msg); fflush (stdout); break; case GCS_ACT_COMMIT_CUT: group_seqno = *(gcs_seqno_t*)thread->act.buf; gu_to_self_cancel (to, thread->act.seqno_l); break; case GCS_ACT_CONF: gcs_test_handle_configuration (gcs, thread); break; case GCS_ACT_STATE_REQ: fprintf (stdout, "Got STATE_REQ\n"); gu_to_grab (to, thread->act.seqno_l); fprintf (stdout, "Sending JOIN: %s\n", strerror(-gcs_join(gcs, 0))); fflush (stdout); gu_to_release (to, thread->act.seqno_l); break; case GCS_ACT_JOIN: fprintf (stdout, "Joined\n"); gu_to_self_cancel (to, thread->act.seqno_l); break; case GCS_ACT_SYNC: fprintf (stdout, "Synced\n"); gu_to_self_cancel (to, thread->act.seqno_l); break; default: fprintf (stderr, "Unexpected action type: %d\n", thread->act.type); } } // fprintf (stderr, "RECV thread %ld exiting: %s\n", // thread->id, strerror(-ret)); return NULL; } static long gcs_test_thread_pool_start (gcs_test_thread_pool_t *pool) { long i; long err = 0; void * (* thread_routine) (void *); switch (pool->type) { case GCS_TEST_REPL: thread_routine = gcs_test_repl; break; case GCS_TEST_SEND: thread_routine = gcs_test_send; break; case GCS_TEST_RECV: thread_routine = gcs_test_recv; break; default: fprintf (stderr, "Bad repl type %u\n", pool->type); return -1; } for (i = 0; i < pool->n_threads; i++) { if ((err = pthread_create (&pool->threads[i].thread, NULL, thread_routine, &pool->threads[i]))) break; } pool->n_started = i; printf ("Started %ld threads of %s type (pool: %p)\n", pool->n_started, GCS_TEST_REPL == pool->type ? "REPL" : (GCS_TEST_SEND == pool->type ? "SEND" :"RECV"), (void*)pool); return 0; } static long gcs_test_thread_pool_join (const gcs_test_thread_pool_t *pool) { long i; for (i = 0; i < pool->n_started; i++) { pthread_join (pool->threads[i].thread, NULL); } return 0; } static long gcs_test_thread_pool_stop (const gcs_test_thread_pool_t *pool) { long i; for (i = 0; i < pool->n_started; i++) { pool->threads[i].n_tries = 0; } return 0; } long gcs_test_thread_pool_cancel (const gcs_test_thread_pool_t *pool) { long i; printf ("Canceling pool: %p\n", (void*)pool); fflush(stdout); printf ("pool type: %u, pool threads: %ld\n", pool->type, pool->n_started); fflush(stdout); for (i = 0; i < pool->n_started; i++) { printf ("Cancelling %ld\n", i); fflush(stdout); pthread_cancel (pool->threads[i].thread); pool->threads[i].n_tries = 0; } return 0; } typedef struct gcs_test_conf { long n_tries; long n_repl; long n_send; long n_recv; const char* backend; } gcs_test_conf_t; static const char* DEFAULT_BACKEND = "dummy://"; static long gcs_test_conf (gcs_test_conf_t *conf, long argc, char *argv[]) { char *endptr; /* defaults */ conf->n_tries = 10; conf->n_repl = 10; conf->n_send = 0; conf->n_recv = 1; conf->backend = DEFAULT_BACKEND; switch (argc) { case 6: conf->n_recv = strtol (argv[5], &endptr, 10); if ('\0' != *endptr) goto error; case 5: conf->n_send = strtol (argv[4], &endptr, 10); if ('\0' != *endptr) goto error; case 4: conf->n_repl = strtol (argv[3], &endptr, 10); if ('\0' != *endptr) goto error; case 3: conf->n_tries = strtol (argv[2], &endptr, 10); if ('\0' != *endptr) goto error; case 2: conf->backend = argv[1]; break; default: break; } printf ("Config: n_tries = %ld, n_repl = %ld, n_send = %ld, n_recv = %ld, " "backend = %s\n", conf->n_tries, conf->n_repl, conf->n_send, conf->n_recv, conf->backend); return 0; error: printf ("Usage: %s [backend] [tries:%ld] [repl threads:%ld] " "[send threads: %ld] [recv threads: %ld]\n", argv[0], conf->n_tries, conf->n_repl, conf->n_send, conf->n_recv); exit (EXIT_SUCCESS); } static inline void test_print_stat (long msgs, size_t size, double interval) { printf ("%7ld (%7.1f per sec.) / %7zuKb (%7.1f Kb/s)\n", msgs, (double)msgs/interval, size >> 10, (double)(size >> 10)/interval); } int main (int argc, char *argv[]) { long err = 0; gcs_test_conf_t conf; gcs_test_thread_pool_t repl_pool, send_pool, recv_pool; const char *channel = "my_channel"; struct timeval t_begin, t_end; gu_config_t* gconf; bool bstrap; gcs_conf_debug_on(); // turn on debug messages if ((err = gcs_test_conf (&conf, argc, argv))) goto out; if (!throughput) { if ((err = test_log_open (&send_log, SEND_LOG))) goto out; if ((err = test_log_open (&recv_log, RECV_LOG))) goto out; } to = gu_to_create ((conf.n_repl + conf.n_recv + 1)*2, GCS_SEQNO_FIRST); if (!to) goto out; // total_tries = conf.n_tries * (conf.n_repl + conf.n_send); printf ("Opening connection: channel = %s, backend = %s\n", channel, conf.backend); gconf = gu_config_create (); if (!gconf) goto out; if (gu_config_add(gconf, "gcache.size", "0")) goto out; if (gu_config_add(gconf, "gcache.page_size", "1M")) goto out; if (!(gcache = gcache_create (gconf, ""))) goto out; if (!(gcs = gcs_create (gconf, gcache, NULL, NULL, 0, 0))) goto out; puts ("debug"); fflush(stdout); /* the following hack won't work if there is 0.0.0.0 in URL options */ bstrap = (NULL != strstr(conf.backend, "0.0.0.0")); if ((err = gcs_open (gcs, channel, conf.backend, bstrap))) goto out; printf ("Connected\n"); msg_len = 1300; if (msg_len > MAX_MSG_LEN) msg_len = MAX_MSG_LEN; gcs_conf_set_pkt_size (gcs, 7570); // to test fragmentation if ((err = gcs_test_thread_pool_create (&repl_pool, GCS_TEST_REPL, conf.n_repl, conf.n_tries))) goto out; if ((err = gcs_test_thread_pool_create (&send_pool, GCS_TEST_SEND, conf.n_send, conf.n_tries))) goto out; if ((err = gcs_test_thread_pool_create (&recv_pool, GCS_TEST_RECV, conf.n_recv, conf.n_tries))) goto out; pthread_mutex_lock (&gcs_test_lock); gcs_test_thread_pool_start (&recv_pool); gcs_test_thread_pool_start (&repl_pool); gcs_test_thread_pool_start (&send_pool); printf ("Press any key to start the load:"); fgetc (stdin); puts ("Started load."); gettimeofday (&t_begin, NULL); printf ("Waiting for %ld seconds\n", conf.n_tries); fflush (stdout); pthread_mutex_unlock (&gcs_test_lock); usleep (conf.n_tries*1000000); puts ("Stopping SEND and REPL threads..."); fflush(stdout); fflush(stderr); gcs_test_thread_pool_stop (&send_pool); gcs_test_thread_pool_stop (&repl_pool); puts ("Threads stopped."); gcs_test_thread_pool_join (&send_pool); gcs_test_thread_pool_join (&repl_pool); puts ("SEND and REPL threads joined."); printf ("Closing GCS connection... "); if ((err = gcs_close (gcs))) goto out; puts ("done."); gcs_test_thread_pool_join (&recv_pool); puts ("RECV threads joined."); gettimeofday (&t_end, NULL); { double interval = (t_end.tv_sec - t_begin.tv_sec) + 0.000001*t_end.tv_usec - 0.000001*t_begin.tv_usec; printf ("Actions sent: "); test_print_stat (msg_sent, size_sent, interval); printf ("Actions received: "); test_print_stat (msg_recvd, size_recvd, interval); printf ("Actions replicated: "); test_print_stat (msg_repld, size_repld, interval); puts("---------------------------------------------------------------"); printf ("Total throughput: "); test_print_stat (msg_repld + msg_recvd, size_repld + size_recvd, interval); printf ("Overhead at 10000 actions/sec: %5.2f%%\n", 1000000.0 * interval / (msg_repld + msg_recvd)); puts(""); } printf ("Press any key to exit the program:\n"); fgetc (stdin); printf ("Freeing GCS connection handle..."); if ((err = gcs_destroy (gcs))) goto out; gcs = NULL; printf ("done\n"); fflush (stdout); printf ("Destroying GCache object:\n"); gcache_destroy (gcache); gcs_test_thread_pool_destroy (&repl_pool); gcs_test_thread_pool_destroy (&send_pool); gcs_test_thread_pool_destroy (&recv_pool); gu_to_destroy(&to); if (!throughput) { printf ("Closing send log\n"); test_log_close (&send_log); printf ("Closing recv log\n"); test_log_close (&recv_log); } { ssize_t total; ssize_t allocs; ssize_t reallocs; ssize_t deallocs; void gu_mem_stats (ssize_t*, ssize_t*, ssize_t*, ssize_t*); gu_mem_stats (&total, &allocs, &reallocs, &deallocs); printf ("Memory statistics:\n" "Memory still allocated: %10lld\n" "Times allocated: %10lld\n" "Times reallocated: %10lld\n" "Times freed: %10lld\n", (long long)total, (long long)allocs, (long long)reallocs, (long long)deallocs); } return 0; out: printf ("Error: %ld (%s)\n", err, strerror (-err)); return err; } galera-3-25.3.20/gcs/src/gcs_core.cpp0000644000015300001660000013024613042054732016777 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ * * * Implementation of the generic communication layer. * See gcs_core.h */ #include "gu_throw.hpp" #define GCS_COMP_MSG_ACCESS #include "gcs_core.hpp" #include "gcs_backend.hpp" #include "gcs_comp_msg.hpp" #include "gcs_fifo_lite.hpp" #include "gcs_group.hpp" #include "gcs_gcache.hpp" #include "gu_debug_sync.hpp" #include // for mempcpy #include bool gcs_core_register (gu_config_t* conf) { return gcs_backend_register(conf); } const size_t CORE_FIFO_LEN = (1 << 10); // 1024 elements (no need to have more) const size_t CORE_INIT_BUF_SIZE = (1 << 16); // 65K - IP packet size typedef enum core_state { CORE_PRIMARY, CORE_EXCHANGE, CORE_NON_PRIMARY, CORE_CLOSED, CORE_DESTROYED } core_state_t; struct gcs_core { gu_config_t* config; gcache_t* cache; /* connection per se */ long prim_comp_no; core_state_t state; /* protocol */ int proto_ver; /* send part */ gu_mutex_t send_lock; // serves 3 purposes: // 1) serializes access to backend send() call // 2) synchronizes with configuration changes // 3) synchronizes with close() call void* send_buf; size_t send_buf_len; gcs_seqno_t send_act_no; /* recv part */ gcs_recv_msg_t recv_msg; /* local action FIFO */ gcs_fifo_lite_t* fifo; /* group context */ gcs_group_t group; /* backend part */ size_t msg_size; gcs_backend_t backend; // message IO context #ifdef GCS_CORE_TESTING gu_lock_step_t ls; // to lock-step in unit tests gu_uuid_t state_uuid; #endif }; // this is to pass local action info from send to recv thread. typedef struct core_act { gcs_seqno_t sent_act_id; const void* action; size_t action_size; } core_act_t; typedef struct causal_act { gcs_seqno_t* act_id; gu_mutex_t* mtx; gu_cond_t* cond; } causal_act_t; static int const GCS_PROTO_MAX = 0; gcs_core_t* gcs_core_create (gu_config_t* const conf, gcache_t* const cache, const char* const node_name, const char* const inc_addr, int const repl_proto_ver, int const appl_proto_ver) { assert (conf); gcs_core_t* core = GU_CALLOC (1, gcs_core_t); if (NULL != core) { core->config = conf; core->cache = cache; // Need to allocate something, otherwise Spread 3.17.3 freaks out. core->recv_msg.buf = gu_malloc(CORE_INIT_BUF_SIZE); if (core->recv_msg.buf) { core->recv_msg.buf_len = CORE_INIT_BUF_SIZE; core->send_buf = GU_CALLOC(CORE_INIT_BUF_SIZE, char); if (core->send_buf) { core->send_buf_len = CORE_INIT_BUF_SIZE; core->fifo = gcs_fifo_lite_create (CORE_FIFO_LEN, sizeof (core_act_t)); if (core->fifo) { gu_mutex_init (&core->send_lock, NULL); core->proto_ver = -1; // shall be bumped in gcs_group_act_conf() gcs_group_init (&core->group, cache, node_name, inc_addr, GCS_PROTO_MAX, repl_proto_ver, appl_proto_ver); core->state = CORE_CLOSED; core->send_act_no = 1; // 0 == no actions sent #ifdef GCS_CORE_TESTING gu_lock_step_init (&core->ls); core->state_uuid = GU_UUID_NIL; #endif return core; // success } gu_free (core->send_buf); } gu_free (core->recv_msg.buf); } gu_free (core); } return NULL; // failure } long gcs_core_init (gcs_core_t* core, gcs_seqno_t seqno, const gu_uuid_t* uuid) { if (core->state == CORE_CLOSED) { return gcs_group_init_history (&core->group, seqno, uuid); } else { gu_error ("State must be CLOSED"); if (core->state < CORE_CLOSED) return -EBUSY; else // DESTROYED return -EBADFD; } } long gcs_core_open (gcs_core_t* core, const char* channel, const char* url, bool const bstrap) { long ret; if (core->state != CORE_CLOSED) { gu_debug ("gcs_core->state isn't CLOSED: %d", core->state); return -EBADFD; } if (core->backend.conn) { assert (core->backend.destroy); core->backend.destroy (&core->backend); memset (&core->backend, 0, sizeof(core->backend)); } gu_debug ("Initializing backend IO layer"); if (!(ret = gcs_backend_init (&core->backend, url, core->config))){ assert (NULL != core->backend.conn); if (!(ret = core->backend.open (&core->backend, channel, bstrap))) { gcs_fifo_lite_open (core->fifo); core->state = CORE_NON_PRIMARY; } else { gu_error ("Failed to open backend connection: %d (%s)", ret, strerror(-ret)); core->backend.destroy (&core->backend); } } else { gu_error ("Failed to initialize backend using '%s': %d (%s)", url, ret, strerror(-ret)); } return ret; } /* Translates different core states into standard errors */ static inline ssize_t core_error (core_state_t state) { switch (state) { case CORE_EXCHANGE: return -EAGAIN; case CORE_NON_PRIMARY: return -ENOTCONN; case CORE_CLOSED: return -ECONNABORTED; case CORE_DESTROYED: return -EBADFD; default: assert(0); return -ENOTRECOVERABLE; } } /*! * Performs an attempt at sending a message (action fragment) with all * required checks while holding a lock, ensuring exclusive access to backend. * * restart flag may be raised if configuration changes and new nodes are * added - that would require all previous members to resend partially sent * actions. */ static inline ssize_t core_msg_send (gcs_core_t* core, const void* msg, size_t msg_len, gcs_msg_type_t msg_type) { ssize_t ret; if (gu_unlikely(0 != gu_mutex_lock (&core->send_lock))) abort(); { if (gu_likely((CORE_PRIMARY == core->state) || (CORE_EXCHANGE == core->state && GCS_MSG_STATE_MSG == msg_type))) { ret = core->backend.send (&core->backend, msg, msg_len, msg_type); if (ret > 0 && ret != (ssize_t)msg_len && GCS_MSG_ACTION != msg_type) { // could not send message in one piece gu_error ("Failed to send complete message of %s type: " "sent %zd out of %zu bytes.", gcs_msg_type_string[msg_type], ret, msg_len); ret = -EMSGSIZE; } } else { ret = core_error (core->state); if (ret >= 0) { gu_fatal ("GCS internal state inconsistency: " "expected error condition."); abort(); // ret = -ENOTRECOVERABLE; } } } gu_mutex_unlock (&core->send_lock); // gu_debug ("returning: %d (%s)", ret, strerror(-ret)); return ret; } /*! * Repeats attempt at sending the message if -EAGAIN was returned * by core_msg_send() */ static inline ssize_t core_msg_send_retry (gcs_core_t* core, const void* buf, size_t buf_len, gcs_msg_type_t type) { ssize_t ret; while ((ret = core_msg_send (core, buf, buf_len, type)) == -EAGAIN) { /* wait for primary configuration - sleep 0.01 sec */ gu_debug ("Backend requested wait"); usleep (10000); } // gu_debug ("returning: %d (%s)", ret, strerror(-ret)); return ret; } ssize_t gcs_core_send (gcs_core_t* const conn, const struct gu_buf* const action, size_t act_size, gcs_act_type_t const act_type) { ssize_t ret = 0; ssize_t sent = 0; gcs_act_frag_t frg; ssize_t send_size; const unsigned char proto_ver = conn->proto_ver; const ssize_t hdr_size = gcs_act_proto_hdr_size (proto_ver); core_act_t* local_act; assert (action != NULL); assert (act_size > 0); /* * Action header will be replicated with every message. * It may seem like an extra overhead, but it is tiny * so far and simplifies A LOT. */ /* Initialize action constants */ frg.act_size = act_size; frg.act_type = act_type; frg.act_id = conn->send_act_no; /* incremented for every new action */ frg.frag_no = 0; frg.proto_ver = proto_ver; if ((ret = gcs_act_proto_write (&frg, conn->send_buf, conn->send_buf_len))) return ret; if ((local_act = (core_act_t*)gcs_fifo_lite_get_tail (conn->fifo))) { *local_act = (core_act_t){ conn->send_act_no, action, act_size }; gcs_fifo_lite_push_tail (conn->fifo); } else { ret = core_error (conn->state); gu_error ("Failed to access core FIFO: %d (%s)", ret, strerror (-ret)); return ret; } int idx = 0; const uint8_t* ptr = (const uint8_t*)action[idx].ptr; size_t left = action[idx].size; do { const size_t chunk_size = act_size < frg.frag_len ? act_size : frg.frag_len; /* Here is the only time we have to cast frg.frag */ char* dst = (char*)frg.frag; size_t to_copy = chunk_size; while (to_copy > 0) { // gather action bufs into one if (to_copy < left) { memcpy (dst, ptr, to_copy); ptr += to_copy; left -= to_copy; to_copy = 0; } else { memcpy (dst, ptr, left); dst += left; to_copy -= left; idx++; ptr = (const uint8_t*)action[idx].ptr; left = action[idx].size; } } send_size = hdr_size + chunk_size; #ifdef GCS_CORE_TESTING gu_lock_step_wait (&conn->ls); // pause after every fragment gu_info ("Sent %p of size %zu. Total sent: %zu, left: %zu", (char*)conn->send_buf + hdr_size, chunk_size, sent, act_size); #endif ret = core_msg_send_retry (conn, conn->send_buf, send_size, GCS_MSG_ACTION); GU_DBUG_SYNC_WAIT("gcs_core_after_frag_send"); #ifdef GCS_CORE_TESTING // gu_lock_step_wait (&conn->ls); // pause after every fragment // gu_info ("Sent %p of size %zu, ret: %zd. Total sent: %zu, left: %zu", // conn->send_buf + hdr_size, chunk_size, ret, sent, act_size); #endif if (gu_likely(ret > hdr_size)) { assert (ret <= send_size); ret -= hdr_size; sent += ret; act_size -= ret; if (gu_unlikely((size_t)ret < chunk_size)) { /* Could not send all that was copied: */ /* 1. adjust frag_len, don't copy more than we could send */ frg.frag_len = ret; /* 2. move ptr back to point at the first unsent byte */ size_t move_back = chunk_size - ret; size_t ptrdiff = ptr - (uint8_t*)action[idx].ptr; do { if (move_back <= ptrdiff) { ptr -= move_back; left = action[idx].size - ptrdiff + move_back; break; } else { assert (idx > 0); move_back -= ptrdiff; idx--; ptrdiff = action[idx].size; ptr = (uint8_t*)action[idx].ptr + ptrdiff; } } while (true); } } else { if (ret >= 0) { // we managed to send less than a header, fail gu_fatal ("Cannot send message: header is too big"); ret = -ENOTRECOVERABLE; } /* At this point we have an unsent action in local FIFO * and parts of this action already could have been received * by other group members. * (first parts of action might be even received by this node, * so that there is nothing to remove, but we cannot know for sure) * * 1. Action will never be received completely by this node. Hence * action must be removed from fifo on behalf of sending thr.: */ gcs_fifo_lite_remove (conn->fifo); /* 2. Members will have to discard received fragments. * Two reasons could lead us here: new member(s) in configuration * change or broken connection (leave group). In both cases other * members discard fragments */ goto out; } } while (act_size && gcs_act_proto_inc(conn->send_buf)); assert (0 == act_size); /* successfully sent action, increment send counter */ conn->send_act_no++; ret = sent; out: // gu_debug ("returning: %d (%s)", ret, strerror(-ret)); return ret; } /* A helper for gcs_core_recv(). * Deals with fetching complete message from backend * and reallocates recv buf if needed */ static inline long core_msg_recv (gcs_backend_t* backend, gcs_recv_msg_t* recv_msg, long long timeout) { long ret; ret = backend->recv (backend, recv_msg, timeout); while (gu_unlikely(ret > recv_msg->buf_len)) { /* recv_buf too small, reallocate */ /* sometimes - like in case of component message, we may need to * do reallocation 2 times. This should be fixed in backend */ void* msg = gu_realloc (recv_msg->buf, ret); gu_debug ("Reallocating buffer from %d to %d bytes", recv_msg->buf_len, ret); if (msg) { /* try again */ recv_msg->buf = msg; recv_msg->buf_len = ret; ret = backend->recv (backend, recv_msg, timeout); /* should be either an error or an exact match */ assert ((ret < 0) || (ret >= recv_msg->buf_len)); } else { /* realloc unsuccessfull, old recv_buf remains */ gu_error ("Failed to reallocate buffer to %d bytes", ret); ret = -ENOMEM; break; } } if (gu_unlikely(ret < 0)) { gu_debug ("returning %d: %s\n", ret, strerror(-ret)); } return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_ACTION. * * @return action size, negative error code or 0 to continue. */ static inline ssize_t core_handle_act_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act_rcvd* act) { ssize_t ret = -1; gcs_group_t* group = &core->group; gcs_act_frag_t frg; bool my_msg = (gcs_group_my_idx(group) == msg->sender_idx); bool commonly_supported_version = true; assert (GCS_MSG_ACTION == msg->type); if ((CORE_PRIMARY == core->state) || my_msg){//should always handle own msgs if (gu_unlikely(gcs_act_proto_ver(msg->buf) != gcs_core_group_protocol_version(core))) { gu_info ("Message with protocol version %d != highest commonly supported: %d. ", gcs_act_proto_ver(msg->buf), gcs_core_group_protocol_version(core)); commonly_supported_version = false; if (!my_msg) { gu_info ("Discard message from member %d because of " "not commonly supported version.", msg->sender_idx); return 0; } else { gu_info ("Resend message because of " "not commonly supported version."); } } ret = gcs_act_proto_read (&frg, msg->buf, msg->size); if (gu_unlikely(ret)) { gu_fatal ("Error parsing action fragment header: %zd (%s).", ret, strerror (-ret)); assert (0); return -ENOTRECOVERABLE; } ret = gcs_group_handle_act_msg (group, &frg, msg, act, commonly_supported_version); if (ret > 0) { /* complete action received */ assert (act->act.buf_len == ret); #ifndef GCS_FOR_GARB assert (NULL != act->act.buf); #else assert (NULL == act->act.buf); #endif act->sender_idx = msg->sender_idx; if (gu_likely(!my_msg)) { /* foreign action, must be passed from gcs_group */ assert (GCS_ACT_TORDERED != act->act.type || act->id > 0); } else { /* local action, get from FIFO, should be there already */ core_act_t* local_act; gcs_seqno_t sent_act_id; if ((local_act = (core_act_t*)gcs_fifo_lite_get_head ( core->fifo))){ act->local = (const struct gu_buf*)local_act->action; act->act.buf_len = local_act->action_size; sent_act_id = local_act->sent_act_id; gcs_fifo_lite_pop_head (core->fifo); assert (NULL != act->local); /* NOTE! local_act cannot be used after this point */ /* sanity check */ if (gu_unlikely(sent_act_id != frg.act_id)) { gu_fatal ("FIFO violation: expected sent_act_id %lld " "found %lld", sent_act_id, frg.act_id); ret = -ENOTRECOVERABLE; } if (gu_unlikely(act->act.buf_len != ret)) { gu_fatal ("Send/recv action size mismatch: %zd/%zd", act->act.buf_len, ret); ret = -ENOTRECOVERABLE; } } else { gu_fatal ("FIFO violation: queue empty when local action " "received"); ret = -ENOTRECOVERABLE; } assert (act->id < 0 || CORE_PRIMARY == core->state); if (gu_unlikely(CORE_PRIMARY != core->state)) { // there can be a tiny race with gcs_core_close(), // so CORE_CLOSED allows TO delivery. assert (act->id < 0 /*#275|| CORE_CLOSED == core->state*/); if (act->id < 0) act->id = core_error (core->state); } } if (gu_unlikely(GCS_ACT_STATE_REQ == act->act.type && ret > 0 && // note: #gh74. // if lingering STR sneaks in when core->state != CORE_PRIMARY // act->id != GCS_SEQNO_ILL (most likely act->id == -EAGAIN) core->state == CORE_PRIMARY)) { #ifdef GCS_FOR_GARB /* ignoring state requests from other nodes (not allocated) */ if (my_msg) { if (act->act.buf_len != act->local[0].size) { gu_fatal ("Protocol violation: state request is fragmented." " Aborting."); abort(); } act->act.buf = act->local[0].ptr; #endif ret = gcs_group_handle_state_request (group, act); assert (ret <= 0 || ret == act->act.buf_len); #ifdef GCS_FOR_GARB if (ret < 0) gu_fatal ("Handling state request failed: %d",ret); act->act.buf = NULL; } else { act->act.buf_len = 0; act->act.type = GCS_ACT_ERROR; act->id = GCS_SEQNO_ILL; act->sender_idx = -1; ret = 0; } #endif } // gu_debug ("Received action: seqno: %lld, sender: %d, size: %d, " // "act: %p", act->id, msg->sender_idx, ret, act->buf); // gu_debug ("%s", (char*) act->buf); } else if (gu_unlikely(ret < 0)){ gu_fatal ("Failed to handle action fragment: %zd (%s)", ret, strerror(-ret)); return -ENOTRECOVERABLE; } } else { /* Non-primary conf, foreign message - ignore */ gu_warn ("Action message in non-primary configuration from " "member %d", msg->sender_idx); ret = 0; } #ifndef NDEBUG if (ret <= 0) { assert (GCS_SEQNO_ILL == act->id); assert (GCS_ACT_ERROR == act->act.type); } #endif return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_LAST. * * @return action size, negative error code or 0 to continue. */ static ssize_t core_handle_last_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act* act) { assert (GCS_MSG_LAST == msg->type); if (gcs_group_is_primary(&core->group)) { gcs_seqno_t commit_cut = gcs_group_handle_last_msg (&core->group, msg); if (commit_cut) { /* commit cut changed */ if ((act->buf = malloc (sizeof (commit_cut)))) { act->type = GCS_ACT_COMMIT_CUT; /* #701 - everything that goes into the action buffer * is expected to be serialized. */ *((gcs_seqno_t*)act->buf) = gcs_seqno_htog(commit_cut); act->buf_len = sizeof(commit_cut); return act->buf_len; } else { gu_fatal ("Out of memory for GCS_ACT_COMMIT_CUT"); return -ENOMEM; } } } else { /* Non-primary - ignore last message */ gu_warn ("Last Applied Action message " "in non-primary configuration from member %d", msg->sender_idx); } return 0; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_COMPONENT. * * @return action size, negative error code or 0 to continue. */ static ssize_t core_handle_comp_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act* act) { ssize_t ret = 0; gcs_group_t* group = &core->group; assert (GCS_MSG_COMPONENT == msg->type); if (msg->size < (ssize_t)sizeof(gcs_comp_msg_t)) { gu_error ("Malformed component message. Ignoring"); return 0; } if (gu_mutex_lock (&core->send_lock)) abort(); ret = gcs_group_handle_comp_msg (group, (const gcs_comp_msg_t*)msg->buf); switch (ret) { case GCS_GROUP_PRIMARY: /* New primary configuration. This happens if: * - this is first node in group OR * - some nodes disappeared no new nodes appeared * No need for state exchange, return new conf_act right away */ assert (CORE_EXCHANGE != core->state); if (CORE_NON_PRIMARY == core->state) core->state = CORE_PRIMARY; ret = gcs_group_act_conf (group, act, &core->proto_ver); if (ret < 0) { gu_fatal ("Failed create PRIM CONF action: %d (%s)", ret, strerror (-ret)); assert (0); ret = -ENOTRECOVERABLE; } assert (ret == act->buf_len); break; case GCS_GROUP_WAIT_STATE_UUID: /* New members, need state exchange. If representative, send UUID */ // if state is CLOSED or DESTROYED we don't do anything if (CORE_CLOSED > core->state) { if (0 == gcs_group_my_idx(group)) { // I'm representative gu_uuid_t uuid; gu_uuid_generate (&uuid, NULL, 0); #ifdef GCS_CORE_TESTING if (gu_uuid_compare(&core->state_uuid, &GU_UUID_NIL)) { uuid = core->state_uuid; } #endif ret = core->backend.send (&core->backend, &uuid, sizeof(uuid), GCS_MSG_STATE_UUID); if (ret < 0) { // if send() failed, it means new configuration change // is on the way. Probably should ignore. gu_warn ("Failed to send state UUID: %d (%s)", ret, strerror (-ret)); } else { gu_info ("STATE_EXCHANGE: sent state UUID: " GU_UUID_FORMAT, GU_UUID_ARGS(&uuid)); } } else { gu_info ("STATE EXCHANGE: Waiting for state UUID."); } core->state = CORE_EXCHANGE; } ret = 0; // no action to return, continue break; case GCS_GROUP_NON_PRIMARY: /* Lost primary component */ if (core->state < CORE_CLOSED) { ret = gcs_group_act_conf (group, act, &core->proto_ver); if (ret < 0) { gu_fatal ("Failed create NON-PRIM CONF action: %d (%s)", ret, strerror (-ret)); assert (0); ret = -ENOTRECOVERABLE; } if (gcs_group_my_idx(group) == -1) { // self-leave gcs_fifo_lite_close (core->fifo); core->state = CORE_CLOSED; if (gcs_comp_msg_error((const gcs_comp_msg_t*)msg->buf)) { ret = -gcs_comp_msg_error( (const gcs_comp_msg_t*)msg->buf); free(const_cast(act->buf)); act->buf = NULL; act->buf_len = 0; act->type = GCS_ACT_ERROR; gu_info("comp msg error in core %d", -ret); } } else { // regular non-prim core->state = CORE_NON_PRIMARY; } } else { // ignore in production? assert(0); } assert (ret == act->buf_len || ret < 0); break; case GCS_GROUP_WAIT_STATE_MSG: gu_fatal ("Internal error: gcs_group_handle_comp() returned " "WAIT_STATE_MSG. Can't continue."); ret = -ENOTRECOVERABLE; assert(0); default: gu_fatal ("Failed to handle component message: %d (%s)!", ret, strerror (-ret)); assert(0); } gu_mutex_unlock (&core->send_lock); return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_STATE_UUID. * * @return negative error code or 0 to continue. */ static ssize_t core_handle_uuid_msg (gcs_core_t* core, gcs_recv_msg_t* msg) { ssize_t ret = 0; gcs_group_t* group = &core->group; assert (GCS_MSG_STATE_UUID == msg->type); if (GCS_GROUP_WAIT_STATE_UUID == gcs_group_state (group)) { ret = gcs_group_handle_uuid_msg (group, msg); switch (ret) { case GCS_GROUP_WAIT_STATE_MSG: // Need to send state message for state exchange { gcs_state_msg_t* state = gcs_group_get_state (group); if (state) { size_t state_len = gcs_state_msg_len (state); uint8_t state_buf[state_len]; const gu_uuid_t* state_uuid = gcs_state_msg_uuid (state); gcs_state_msg_write (state_buf, state); ret = core_msg_send_retry (core, state_buf, state_len, GCS_MSG_STATE_MSG); if (ret > 0) { gu_info ("STATE EXCHANGE: sent state msg: " GU_UUID_FORMAT, GU_UUID_ARGS(state_uuid)); } else { // This may happen if new configuraiton chage goes on. // What shall we do in this case? Is it unrecoverable? gu_error ("STATE EXCHANGE: failed for: " GU_UUID_FORMAT ": %d (%s)", GU_UUID_ARGS(state_uuid), ret, strerror(-ret)); } gcs_state_msg_destroy (state); } else { gu_fatal ("Failed to allocate state object."); ret = -ENOTRECOVERABLE; } } break; case GCS_GROUP_WAIT_STATE_UUID: // In case of stray state uuid message break; default: assert(ret < 0); gu_error ("Failed to handle state UUID: %d (%s)", ret, strerror (-ret)); } } return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_STATE_MSG. * * @return action size, negative error code or 0 to continue. */ static ssize_t core_handle_state_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act* act) { ssize_t ret = 0; gcs_group_t* group = &core->group; assert (GCS_MSG_STATE_MSG == msg->type); if (GCS_GROUP_WAIT_STATE_MSG == gcs_group_state (group)) { if (gu_mutex_lock (&core->send_lock)) abort(); ret = gcs_group_handle_state_msg (group, msg); switch (ret) { case GCS_GROUP_PRIMARY: case GCS_GROUP_NON_PRIMARY: // state exchange is over, create configuration action // if core is closing we do nothing if (CORE_CLOSED > core->state) { assert (CORE_EXCHANGE == core->state); switch (ret) { case GCS_GROUP_PRIMARY: core->state = CORE_PRIMARY; break; case GCS_GROUP_NON_PRIMARY: core->state = CORE_NON_PRIMARY; break; default: assert (0); } } ret = gcs_group_act_conf (group, act, &core->proto_ver); if (ret < 0) { gu_fatal ("Failed create CONF action: %d (%s)", ret, strerror (-ret)); assert (0); ret = -ENOTRECOVERABLE; } assert (ret == act->buf_len); break; case GCS_GROUP_WAIT_STATE_MSG: // waiting for more state messages ret = 0; break; default: assert (ret < 0); gu_error ("Failed to handle state message: %d (%s)", ret, strerror (-ret)); } gu_mutex_unlock (&core->send_lock); } return ret; } /*! * Some service actions are for internal use and consist of a single message * (FLOW, JOIN, SYNC) * In this case we can simply use msg->buf as an action buffer, since we * can guarantee that we don't deallocate it. Action here is just a wrapper * to deliver message to the upper level. */ static ssize_t core_msg_to_action (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act_rcvd* rcvd) { ssize_t ret = 0; gcs_group_t* group = &core->group; if (GCS_GROUP_PRIMARY == gcs_group_state (group)) { gcs_act_type_t act_type; switch (msg->type) { case GCS_MSG_FLOW: // most frequent ret = 1; act_type = GCS_ACT_FLOW; break; case GCS_MSG_JOIN: ret = gcs_group_handle_join_msg (group, msg); assert (gcs_group_my_idx(group) == msg->sender_idx || 0 >= ret); if (-ENOTRECOVERABLE == ret) { core->backend.close(&core->backend); // See #165. // There is nobody to pass this error to for graceful shutdown: // application thread is blocked waiting for SST. // Also note that original ret value is not preserved on return // so this must be done here. gu_abort(); } act_type = GCS_ACT_JOIN; break; case GCS_MSG_SYNC: ret = gcs_group_handle_sync_msg (group, msg); act_type = GCS_ACT_SYNC; break; default: gu_error ("Iternal error. Unexpected message type %s from %ld", gcs_msg_type_string[msg->type], msg->sender_idx); assert (0); ret = -EPROTO; } if (ret != 0) { if (ret > 0) rcvd->id = 0; else if (ret < 0) rcvd->id = ret; struct gcs_act* const act(&rcvd->act); act->type = act_type; act->buf = msg->buf; act->buf_len = msg->size; ret = msg->size; } } else { gu_warn ("%s message from member %ld in non-primary configuration. " "Ignored.", gcs_msg_type_string[msg->type], msg->sender_idx); } return ret; } static long core_msg_causal(gcs_core_t* conn, struct gcs_recv_msg* msg) { causal_act_t* act; if (gu_unlikely(msg->size != sizeof(*act))) { gu_error("invalid causal act len %ld, expected %ld", msg->size, sizeof(*act)); return -EPROTO; } gcs_seqno_t const causal_seqno = GCS_GROUP_PRIMARY == conn->group.state ? conn->group.act_id_ : GCS_SEQNO_ILL; act = (causal_act_t*)msg->buf; gu_mutex_lock(act->mtx); *act->act_id = causal_seqno; gu_cond_signal(act->cond); gu_mutex_unlock(act->mtx); return msg->size; } /*! Receives action */ ssize_t gcs_core_recv (gcs_core_t* conn, struct gcs_act_rcvd* recv_act, long long timeout) { // struct gcs_act_rcvd recv_act; struct gcs_recv_msg* recv_msg = &conn->recv_msg; ssize_t ret = 0; static struct gcs_act_rcvd zero_act( gcs_act(NULL, 0, GCS_ACT_ERROR), NULL, -1, // GCS_SEQNO_ILL -1); *recv_act = zero_act; /* receive messages from group and demultiplex them * until finally some complete action is ready */ do { assert (recv_act->act.buf == NULL); assert (recv_act->act.buf_len == 0); assert (recv_act->act.type == GCS_ACT_ERROR); assert (recv_act->id == GCS_SEQNO_ILL); assert (recv_act->sender_idx == -1); ret = core_msg_recv (&conn->backend, recv_msg, timeout); if (gu_unlikely (ret <= 0)) { goto out; /* backend error while receiving message */ } switch (recv_msg->type) { case GCS_MSG_ACTION: ret = core_handle_act_msg(conn, recv_msg, recv_act); assert (ret == recv_act->act.buf_len || ret <= 0); break; case GCS_MSG_LAST: ret = core_handle_last_msg(conn, recv_msg, &recv_act->act); assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len); break; case GCS_MSG_COMPONENT: ret = core_handle_comp_msg (conn, recv_msg, &recv_act->act); // assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len || ret <= 0); break; case GCS_MSG_STATE_UUID: ret = core_handle_uuid_msg (conn, recv_msg); // assert (ret >= 0); // hang on error in debug mode ret = 0; // continue waiting for state messages break; case GCS_MSG_STATE_MSG: ret = core_handle_state_msg (conn, recv_msg, &recv_act->act); assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len); break; case GCS_MSG_JOIN: case GCS_MSG_SYNC: case GCS_MSG_FLOW: ret = core_msg_to_action (conn, recv_msg, recv_act); assert (ret == recv_act->act.buf_len || ret <= 0); break; case GCS_MSG_CAUSAL: ret = core_msg_causal(conn, recv_msg); assert(recv_msg->sender_idx == gcs_group_my_idx(&conn->group)); assert(ret == recv_msg->size || ret <= 0); ret = 0; // continue waiting for messages break; default: // this normaly should not happen, shall we bother with // protection? gu_warn ("Received unsupported message type: %d, length: %d, " "sender: %d", recv_msg->type, recv_msg->size, recv_msg->sender_idx); // continue looping } } while (0 == ret); /* end of recv loop */ out: assert (ret || GCS_ACT_ERROR == recv_act->act.type); assert (ret == recv_act->act.buf_len || ret < 0); assert (recv_act->id <= 0 || recv_act->act.type == GCS_ACT_TORDERED || recv_act->act.type == GCS_ACT_STATE_REQ); // <- dirty hack assert (recv_act->sender_idx >= 0 || recv_act->act.type != GCS_ACT_TORDERED); // gu_debug ("Returning %d", ret); if (ret < 0) { assert (recv_act->id < 0); if (GCS_ACT_TORDERED == recv_act->act.type && recv_act->act.buf) { gcs_gcache_free (conn->cache, recv_act->act.buf); recv_act->act.buf = NULL; } if (-ENOTRECOVERABLE == ret) { conn->backend.close(&conn->backend); gu_abort(); } } return ret; } long gcs_core_close (gcs_core_t* core) { long ret; if (!core) return -EBADFD; if (gu_mutex_lock (&core->send_lock)) return -EBADFD; if (core->state >= CORE_CLOSED) { ret = -EBADFD; } else { ret = core->backend.close (&core->backend); } gu_mutex_unlock (&core->send_lock); return ret; } long gcs_core_destroy (gcs_core_t* core) { core_act_t* tmp; if (!core) return -EBADFD; if (gu_mutex_lock (&core->send_lock)) return -EBADFD; { if (CORE_CLOSED != core->state) { if (core->state < CORE_CLOSED) gu_error ("Calling destroy() before close()."); gu_mutex_unlock (&core->send_lock); return -EBADFD; } if (core->backend.conn) { gu_debug ("Calling backend.destroy()"); core->backend.destroy (&core->backend); } core->state = CORE_DESTROYED; } gu_mutex_unlock (&core->send_lock); /* at this point all send attempts are isolated */ /* after that we must be able to destroy mutexes */ while (gu_mutex_destroy (&core->send_lock)); /* now noone will interfere */ while ((tmp = (core_act_t*)gcs_fifo_lite_get_head (core->fifo))) { // whatever is in tmp.action is allocated by app., just forget it. gcs_fifo_lite_pop_head (core->fifo); } gcs_fifo_lite_destroy (core->fifo); gcs_group_free (&core->group); /* free buffers */ gu_free (core->recv_msg.buf); gu_free (core->send_buf); #ifdef GCS_CORE_TESTING gu_lock_step_destroy (&core->ls); #endif gu_free (core); return 0; } gcs_proto_t gcs_core_group_protocol_version (const gcs_core_t* conn) { return conn->proto_ver; } int gcs_core_set_pkt_size (gcs_core_t* core, int const pkt_size) { if (core->state >= CORE_CLOSED) { gu_error ("Attempt to set packet size on a closed connection."); return -EBADFD; } int const hdr_size(gcs_act_proto_hdr_size(core->proto_ver)); if (hdr_size < 0) return hdr_size; int const min_msg_size(hdr_size + 1); int msg_size(core->backend.msg_size(&core->backend, pkt_size)); if (msg_size < min_msg_size) { gu_warn ("Requested packet size %d is too small, " "using smallest possible: %d", pkt_size, pkt_size + (min_msg_size - msg_size)); msg_size = min_msg_size; } /* even if backend may not support limiting packet size force max message * size at this level */ msg_size = std::min(std::max(min_msg_size, pkt_size), msg_size); gu_info ("Changing maximum packet size to %d, resulting msg size: %d", pkt_size, msg_size); int ret(msg_size - hdr_size); // message payload assert(ret > 0); if (core->send_buf_len == (size_t)msg_size) return ret; if (gu_mutex_lock (&core->send_lock)) abort(); { if (core->state != CORE_DESTROYED) { void* new_send_buf(gu_realloc(core->send_buf, msg_size)); if (new_send_buf) { core->send_buf = new_send_buf; core->send_buf_len = msg_size; memset (core->send_buf, 0, hdr_size); // to pacify valgrind gu_debug ("Message payload (action fragment size): %d", ret); } else { ret = -ENOMEM; } } else { ret = -EBADFD; } } gu_mutex_unlock (&core->send_lock); return ret; } static inline long core_send_seqno (gcs_core_t* core, gcs_seqno_t seqno, gcs_msg_type_t msg_type) { gcs_seqno_t const htogs = gcs_seqno_htog (seqno); ssize_t ret = core_msg_send_retry (core, &htogs, sizeof(htogs), msg_type); if (ret > 0) { assert(ret == sizeof(seqno)); ret = 0; } return ret; } long gcs_core_set_last_applied (gcs_core_t* core, gcs_seqno_t seqno) { return core_send_seqno (core, seqno, GCS_MSG_LAST); } long gcs_core_send_join (gcs_core_t* core, gcs_seqno_t seqno) { return core_send_seqno (core, seqno, GCS_MSG_JOIN); } long gcs_core_send_sync (gcs_core_t* core, gcs_seqno_t seqno) { return core_send_seqno (core, seqno, GCS_MSG_SYNC); } long gcs_core_send_fc (gcs_core_t* core, const void* fc, size_t fc_size) { ssize_t ret; ret = core_msg_send_retry (core, fc, fc_size, GCS_MSG_FLOW); if (ret == (ssize_t)fc_size) { ret = 0; } return ret; } gcs_seqno_t gcs_core_caused(gcs_core_t* core) { long ret; gcs_seqno_t act_id = GCS_SEQNO_ILL; gu_mutex_t mtx; gu_cond_t cond; causal_act_t act = {&act_id, &mtx, &cond}; gu_mutex_init (&mtx, NULL); gu_cond_init (&cond, NULL); gu_mutex_lock (&mtx); { ret = core_msg_send_retry (core, &act, sizeof(act), GCS_MSG_CAUSAL); if (ret == sizeof(act)) { gu_cond_wait (&cond, &mtx); } else { assert (ret < 0); act_id = ret; } } gu_mutex_unlock (&mtx); gu_mutex_destroy (&mtx); gu_cond_destroy (&cond); return act_id; } long gcs_core_param_set (gcs_core_t* core, const char* key, const char* value) { if (core->backend.conn) { return core->backend.param_set (&core->backend, key, value); } else { return 1; } } const char* gcs_core_param_get (gcs_core_t* core, const char* key) { if (core->backend.conn) { return core->backend.param_get (&core->backend, key); } else { return NULL; } } void gcs_core_get_status(gcs_core_t* core, gu::Status& status) { if (gu_mutex_lock(&core->send_lock)) gu_throw_fatal << "could not lock mutex"; if (core->state < CORE_CLOSED) { gcs_group_get_status(&core->group, status); core->backend.status_get(&core->backend, status); } gu_mutex_unlock(&core->send_lock); } #ifdef GCS_CORE_TESTING gcs_backend_t* gcs_core_get_backend (gcs_core_t* core) { return &core->backend; } void gcs_core_send_lock_step (gcs_core_t* core, bool enable) { gu_lock_step_enable (&core->ls, enable); } long gcs_core_send_step (gcs_core_t* core, long timeout_ms) { return gu_lock_step_cont (&core->ls, timeout_ms); } void gcs_core_set_state_uuid (gcs_core_t* core, const gu_uuid_t* uuid) { core->state_uuid = *uuid; } const gcs_group_t* gcs_core_get_group (const gcs_core_t* core) { return &core->group; } gcs_fifo_lite_t* gcs_core_get_fifo (gcs_core_t* core) { return core->fifo; } #endif /* GCS_CORE_TESTING */ galera-3-25.3.20/gcs/src/gcs_backend.hpp0000644000015300001660000001364713042054732017450 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * This header defines GC backend interface. * Since we can't know backend context in advance, * we have to use type void*. Kind of unsafe. */ #ifndef _gcs_backend_h_ #define _gcs_backend_h_ #include "gcs.hpp" #include "gcs_recv_msg.hpp" #include #include #include typedef struct gcs_backend_conn gcs_backend_conn_t; typedef struct gcs_backend gcs_backend_t; /* * The macros below are declarations of backend functions * (kind of function signatures) */ /*! Registers configuration parameters with config */ #define GCS_BACKEND_REGISTER_FN(fn) \ bool fn (gu_config_t* cnf) /*! Allocates backend context and sets up the backend structure */ #define GCS_BACKEND_CREATE_FN(fn) \ long fn (gcs_backend_t* backend, \ const char* const addr, \ gu_config_t* const cnf) /*! Deallocates backend context */ #define GCS_BACKEND_DESTROY_FN(fn) \ long fn (gcs_backend_t* backend) /*! Puts backend handle into operating state */ #define GCS_BACKEND_OPEN_FN(fn) \ long fn (gcs_backend_t* backend, \ const char* const channel, \ bool const bootstrap) /*! Puts backend handle into non-operating state */ #define GCS_BACKEND_CLOSE_FN(fn) \ long fn (gcs_backend_t* backend) /*! * Send a message from the backend. * * @param backend * a pointer to the backend handle * @param buf * a buffer to copy the message to * @param len * length of the supplied buffer * @param msg_type * type of the message * @return * negative error code in case of error * OR * amount of bytes sent */ #define GCS_BACKEND_SEND_FN(fn) \ long fn (gcs_backend_t* const backend, \ const void* const buf, \ size_t const len, \ gcs_msg_type_t const msg_type) /*! * Receive a message from the backend. * * @param backend * a pointer to the backend object * @param buf * a buffer to copy the message to * @param len * length of the supplied buffer * @param msg_type * type of the message * @param sender_id * unique sender ID in this configuration * @param timeout * absolute timeout date in nanoseconds * @return * negative error code in case of error * OR * the length of the message, so if it is bigger * than len, it has to be reread with a bigger buffer */ #define GCS_BACKEND_RECV_FN(fn) \ long fn (gcs_backend_t* const backend, \ gcs_recv_msg_t* const msg, \ long long const timeout) /* for lack of better place define it here */ static const long GCS_SENDER_NONE = -1; /** When there's no sender */ /*! Returns symbolic name of the backend */ #define GCS_BACKEND_NAME_FN(fn) \ const char* fn (void) /*! * Returns the size of the message such that resulting network packet won't * exceed given value (basically, pkt_size - headers). * * @param backend * backend handle * @param pkt_size * desired size of a network packet * @return * - message size coresponding to the desired network packet size OR * - maximum message size the backend supports if requested packet size * is too big OR * - negative amount by which the packet size must be increased in order * to send at least 1 byte. */ #define GCS_BACKEND_MSG_SIZE_FN(fn) \ long fn (gcs_backend_t* const backend, \ long const pkt_size) /*! * @param backend * backend handle * @param key * parameter name * @param value * parameter value * @return 1 if parameter not recognized, 0 in case of success and negative * error code in case of error */ #define GCS_BACKEND_PARAM_SET_FN(fn) \ long fn (gcs_backend_t* backend, \ const char* key, \ const char* value) /*! * @param backend * backend handle * @param key * parameter name * @return NULL if parameter not recognized */ #define GCS_BACKEND_PARAM_GET_FN(fn) \ const char* fn (gcs_backend_t* backend, \ const char* key) /*! * @param backend * backend handle * @param status * reference to status variable map */ #define GCS_BACKEND_STATUS_GET_FN(fn) \ void fn(gcs_backend_t* backend, \ gu::Status& status) typedef GCS_BACKEND_CREATE_FN ((*gcs_backend_create_t)); typedef GCS_BACKEND_DESTROY_FN ((*gcs_backend_destroy_t)); typedef GCS_BACKEND_OPEN_FN ((*gcs_backend_open_t)); typedef GCS_BACKEND_CLOSE_FN ((*gcs_backend_close_t)); typedef GCS_BACKEND_SEND_FN ((*gcs_backend_send_t)); typedef GCS_BACKEND_RECV_FN ((*gcs_backend_recv_t)); typedef GCS_BACKEND_NAME_FN ((*gcs_backend_name_t)); typedef GCS_BACKEND_MSG_SIZE_FN ((*gcs_backend_msg_size_t)); typedef GCS_BACKEND_PARAM_SET_FN ((*gcs_backend_param_set_t)); typedef GCS_BACKEND_PARAM_GET_FN ((*gcs_backend_param_get_t)); typedef GCS_BACKEND_STATUS_GET_FN ((*gcs_backend_status_get_t)); struct gcs_backend { gcs_backend_conn_t* conn; gcs_backend_open_t open; gcs_backend_close_t close; gcs_backend_destroy_t destroy; gcs_backend_send_t send; gcs_backend_recv_t recv; gcs_backend_name_t name; gcs_backend_msg_size_t msg_size; gcs_backend_param_set_t param_set; gcs_backend_param_get_t param_get; gcs_backend_status_get_t status_get; }; /*! * Registers backends' parameters with config. */ bool gcs_backend_register(gu_config_t* conf); /*! * Initializes preallocated backend object and opens backend connection * (sort of like 'new') */ long gcs_backend_init (gcs_backend_t* bk, const char* uri, gu_config_t* cnf); #endif /* _gcs_backend_h_ */ galera-3-25.3.20/gcs/src/unit_tests/0000755000015300001660000000000013042054732016702 5ustar jenkinsjenkinsgalera-3-25.3.20/gcs/src/unit_tests/gcs_proto_test.cpp0000644000015300001660000000744113042054732022452 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include #include "../gcs_act_proto.hpp" #include "gcs_proto_test.hpp" static long frgcmp (gcs_act_frag_t* f1, gcs_act_frag_t* f2) { if ( (f1->act_id == f2->act_id) && (f1->act_size == f2->act_size) && (f1->act_type == f2->act_type) && (f1->frag_len == f2->frag_len) && // expect to point (f1->frag == f2->frag) // at the same buffer here ) return 0; else return -1; } START_TEST (gcs_proto_test) { const char act_send[] = "Test action smuction"; const char* act_send_ptr = act_send; char act_recv[] = "owoeijrvfokpvfcsdnfvkmk;l"; char* act_recv_ptr = act_recv; const size_t buf_len = 32; char buf[buf_len]; gcs_act_frag_t frg_send, frg_recv; long ret; frg_send.act_id = getpid(); frg_send.act_size = strlen (act_send); frg_send.frag = NULL; frg_send.frag_len = 0; frg_send.frag_no = 0; frg_send.act_type = (gcs_act_type_t)0; frg_send.proto_ver = 0; // set up action header ret = gcs_act_proto_write (&frg_send, buf, buf_len); fail_if (ret, "error code: %d", ret); fail_if (frg_send.frag == NULL); fail_if (frg_send.frag_len == 0); fail_if (strlen(act_send) < frg_send.frag_len, "Expected fragmentation, but action seems to fit in buffer" " - increase send action length"); // write action to the buffer, it should not fit strncpy ((char*)frg_send.frag, act_send_ptr, frg_send.frag_len); act_send_ptr += frg_send.frag_len; // message was sent and received, now parse the header ret = gcs_act_proto_read (&frg_recv, buf, buf_len); fail_if (ret, "error code: %d", ret); fail_if (frg_recv.frag == NULL); fail_if (frg_recv.frag_len == 0); fail_if (frgcmp (&frg_send, &frg_recv), "Sent and recvd headers are not identical"); fail_if (frg_send.frag_no != frg_recv.frag_no, "Fragment numbers are not identical: %d %d", frg_send.frag_no, frg_recv.frag_no); // read the fragment into receiving action buffer // FIXME: this works by sheer luck - only because strncpy() pads // the remaining buffer space with 0 strncpy (act_recv_ptr, (const char*)frg_recv.frag, frg_recv.frag_len); act_recv_ptr += frg_recv.frag_len; // send the second fragment. Increment the fragment counter gcs_act_proto_inc (buf); // should be 1 now // write action to the buffer, it should fit now strncpy ((char*)frg_send.frag, act_send_ptr, frg_send.frag_len); // act_send_ptr += frg_send.frag_len; // message was sent and received, now parse the header ret = gcs_act_proto_read (&frg_recv, buf, buf_len); fail_if (ret, "error code: %d", ret); fail_if (frgcmp (&frg_send, &frg_recv), "Sent and recvd headers are not identical"); fail_if (frg_send.frag_no + 1 != frg_recv.frag_no, "Fragment numbers are not sequential: %d %d", frg_send.frag_no, frg_recv.frag_no); // read the fragment into receiving action buffer // FIXME: this works by sheer luck - only because strncpy() pads // the remaining buffer space with 0 strncpy (act_recv_ptr, (const char*)frg_recv.frag, frg_recv.frag_len); fail_if (strlen(act_recv_ptr) >= frg_send.frag_len, "Fragment does not seem to fit in buffer: '%s'(%d)", strlen(act_recv_ptr), act_recv_ptr); // check that actions are identical fail_if (strcmp(act_send, act_recv), "Actions don't match: '%s' -- '%s'", act_send, act_recv); } END_TEST Suite *gcs_proto_suite(void) { Suite *suite = suite_create("GCS core protocol"); TCase *tcase = tcase_create("gcs_proto"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_proto_test); return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_comp_test.hpp0000644000015300001660000000030313042054732022240 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_comp_test__ #define __gcs_comp_test__ extern Suite *gcs_comp_suite(void); #endif /* __gu_comp_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_backend_test.cpp0000644000015300001660000000462413042054732022676 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ #include #include #include #include #include #include #include "../gcs_backend.hpp" #include "gcs_backend_test.hpp" // Fake backend definitons. Must be global for gcs_backend.c to see GCS_BACKEND_NAME_FN(gcs_test_name) { return "DUMMIEEEE!"; } GCS_BACKEND_CREATE_FN(gcs_test_create) { backend->name = gcs_test_name; return 0; } GCS_BACKEND_NAME_FN(gcs_spread_name) { return "SPREAT"; } GCS_BACKEND_CREATE_FN(gcs_spread_create) { backend->name = gcs_spread_name; return 0; } GCS_BACKEND_NAME_FN(gcs_vs_name) { return "vsssssssss"; } GCS_BACKEND_CREATE_FN(gcs_vs_create) { backend->name = gcs_vs_name; return 0; } GCS_BACKEND_NAME_FN(gcs_gcomm_name) { return "gCOMMMMM!!!"; } GCS_BACKEND_CREATE_FN(gcs_gcomm_create) { backend->name = gcs_gcomm_name; return 0; } START_TEST (gcs_backend_test) { gcs_backend_t backend; long ret; gu_config_t* config = gu_config_create (); fail_if (config == NULL); ret = gcs_backend_init (&backend, "wrong://kkk", config); fail_if (ret != -ESOCKTNOSUPPORT); ret = gcs_backend_init (&backend, "spread:", config); fail_if (ret != -EINVAL); ret = gcs_backend_init (&backend, "dummy://", config); fail_if (ret != 0, "ret = %d (%s)", ret, strerror(-ret)); // fail_if (backend.name != gcs_test_name); this test is broken since we can // no longer use global gcs_dummy_create() symbol because linking with real // gcs_dummy.o ret = gcs_backend_init (&backend, "gcomm://0.0.0.0:4567", config); #ifdef GCS_USE_GCOMM fail_if (ret != 0, "ret = %d (%s)", ret, strerror(-ret)); fail_if (backend.name != gcs_gcomm_name); #else fail_if (ret != -ESOCKTNOSUPPORT); #endif // ret = gcs_backend_init (&backend, "vsbes://kkk"); // fail_if (ret != 0, "ret = %d (%s)", ret, strerror(-ret)); // fail_if (backend.name != gcs_vs_name); // ret = gcs_backend_init (&backend, "spread://"); // fail_if (ret != 0, "ret = %d (%s)", ret, strerror(-ret)); // fail_if (backend.name != gcs_spread_name); } END_TEST Suite *gcs_backend_suite(void) { Suite *suite = suite_create("GCS backend interface"); TCase *tcase = tcase_create("gcs_backend"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_backend_test); return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_group_test.hpp0000644000015300001660000000030713042054732022442 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_group_test__ #define __gcs_group_test__ extern Suite *gcs_group_suite(void); #endif /* __gu_group_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_node_test.hpp0000644000015300001660000000030313042054732022227 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_node_test__ #define __gcs_node_test__ extern Suite *gcs_node_suite(void); #endif /* __gu_node_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_memb_test.hpp0000644000015300001660000000030413042054732022223 0ustar jenkinsjenkins/* * Copyright (C) 2011 Codership Oy * * $Id$ */ #ifndef __gcs_memb_test__ #define __gcs_memb_test__ extern Suite *gcs_memb_suite(void); #endif /* __gu_group_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_state_msg_test.cpp0000644000015300001660000005370413042054732023300 0ustar jenkinsjenkins// Copyright (C) 2007-2013 Codership Oy // $Id$ #include #include #include "gcs_state_msg_test.hpp" #define GCS_STATE_MSG_ACCESS #include "../gcs_state_msg.hpp" static int const QUORUM_VERSION = 4; START_TEST (gcs_state_msg_test_basic) { ssize_t send_len, ret; gu_uuid_t state_uuid; gu_uuid_t group_uuid; gu_uuid_t prim_uuid; gcs_state_msg_t* send_state; gcs_state_msg_t* recv_state; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group_uuid, NULL, 0); gu_uuid_generate (&prim_uuid, NULL, 0); send_state = gcs_state_msg_create (&state_uuid, &group_uuid, &prim_uuid, 457, // prim_seqno 3465, // last received seq. 2345, // last cached seq. 5, // prim_joined GCS_NODE_STATE_JOINED, // prim_state GCS_NODE_STATE_NON_PRIM, // current_state "My Name", // name "192.168.0.1:2345", // inc_addr 0, // gcs_proto_ver 1, // repl_proto_ver 1, // appl_proto_ver 0, // desync_count GCS_STATE_FREP // flags ); fail_if (NULL == send_state); send_len = gcs_state_msg_len (send_state); fail_if (send_len < 0, "gcs_state_msg_len() returned %zd (%s)", send_len, strerror (-send_len)); { uint8_t send_buf[send_len]; ret = gcs_state_msg_write (send_buf, send_state); fail_if (ret != send_len, "Return value does not match send_len: " "expected %zd, got %zd", send_len, ret); recv_state = gcs_state_msg_read (send_buf, send_len); fail_if (NULL == recv_state); } fail_if (send_state->flags != recv_state->flags); fail_if (send_state->gcs_proto_ver != recv_state->gcs_proto_ver); fail_if (send_state->repl_proto_ver != recv_state->repl_proto_ver); fail_if (send_state->appl_proto_ver != recv_state->appl_proto_ver); fail_if (recv_state->appl_proto_ver != 1, "appl_proto_ver: %d", recv_state->appl_proto_ver); fail_if (send_state->received != recv_state->received, "Last received seqno: sent %lld, recv %lld", send_state->received, recv_state->received); fail_if (send_state->cached != recv_state->cached, "Last cached seqno: sent %lld, recv %lld", send_state->cached, recv_state->cached); fail_if (send_state->prim_seqno != recv_state->prim_seqno); fail_if (send_state->current_state != recv_state->current_state); fail_if (send_state->prim_state != recv_state->prim_state); fail_if (send_state->prim_joined != recv_state->prim_joined); fail_if (gu_uuid_compare (&recv_state->state_uuid, &state_uuid)); fail_if (gu_uuid_compare (&recv_state->group_uuid, &group_uuid)); fail_if (gu_uuid_compare (&recv_state->prim_uuid, &prim_uuid)); fail_if (strcmp(send_state->name, recv_state->name)); fail_if (strcmp(send_state->inc_addr, recv_state->inc_addr)); { size_t str_len = 1024; char send_str[str_len]; char recv_str[str_len]; fail_if (gcs_state_msg_snprintf (send_str, str_len, send_state) <= 0); fail_if (gcs_state_msg_snprintf (recv_str, str_len, recv_state) <= 0); // no longer true fail_if (strncmp (send_str, recv_str, str_len)); } gcs_state_msg_destroy (send_state); gcs_state_msg_destroy (recv_state); } END_TEST START_TEST (gcs_state_msg_test_quorum_inherit) { gcs_state_msg_t* st[3] = { NULL, }; gu_uuid_t state_uuid; gu_uuid_t group1_uuid, group2_uuid; gu_uuid_t prim1_uuid, prim2_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group1_uuid, NULL, 0); gu_uuid_generate (&group2_uuid, NULL, 0); gu_uuid_generate (&prim1_uuid, NULL, 0); gu_uuid_generate (&prim2_uuid, NULL, 0); gcs_seqno_t prim1_seqno = 123; gcs_seqno_t prim2_seqno = 834; gcs_seqno_t act1_seqno = 345; gcs_seqno_t act2_seqno = 239472508908LL; gcs_state_quorum_t quorum; mark_point(); /* First just nodes from different groups and configurations, none JOINED */ st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno - 1, act2_seqno - 1, act2_seqno-1, 5, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node0", "", 0, 1, 1, 0, 0); fail_if(NULL == st[0]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 1, 3, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node1", "", 0, 1, 0, 0, 0); fail_if(NULL == st[1]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, act2_seqno - 2, 5, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node2", "", 0, 1, 1, 0, 1); fail_if(NULL == st[2]); gu_info (" Inherited 1"); int ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (false != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); fail_if (GCS_SEQNO_ILL != quorum.act_id); fail_if (GCS_SEQNO_ILL != quorum.conf_id); fail_if (-1 != quorum.gcs_proto_ver); fail_if (-1 != quorum.repl_proto_ver); fail_if (-1 != quorum.appl_proto_ver); /* now make node1 inherit PC */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 3, 3, GCS_NODE_STATE_JOINED, GCS_NODE_STATE_DONOR, "node1", "", 0, 1, 0, 0, 0); fail_if(NULL == st[1]); gu_info (" Inherited 2"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (true != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &group1_uuid)); fail_if (act1_seqno != quorum.act_id); fail_if (prim1_seqno != quorum.conf_id); fail_if (0 != quorum.gcs_proto_ver); fail_if (1 != quorum.repl_proto_ver); fail_if (0 != quorum.appl_proto_ver); /* now make node0 inherit PC (should yield conflicting uuids) */ gcs_state_msg_destroy (st[0]); st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno - 1, act2_seqno - 1, -1, 5, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_SYNCED, "node0", "", 0, 1, 1, 0, 0); fail_if(NULL == st[0]); gu_info (" Inherited 3"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (false != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); fail_if (GCS_SEQNO_ILL != quorum.act_id); fail_if (GCS_SEQNO_ILL != quorum.conf_id); fail_if (-1 != quorum.gcs_proto_ver); fail_if (-1 != quorum.repl_proto_ver); fail_if (-1 != quorum.appl_proto_ver); /* now make node1 non-joined again: group2 should win */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno -3, 3, GCS_NODE_STATE_JOINED, GCS_NODE_STATE_PRIM, "node1", "", 0, 1, 0, 0, 0); fail_if(NULL == st[1]); gu_info (" Inherited 4"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (true != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); fail_if (act2_seqno - 1 != quorum.act_id); fail_if (prim2_seqno - 1 != quorum.conf_id); fail_if (0 != quorum.gcs_proto_ver); fail_if (1 != quorum.repl_proto_ver); fail_if (0 != quorum.appl_proto_ver); /* now make node2 joined: it should become a representative */ gcs_state_msg_destroy (st[2]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, act2_seqno - 2, 5, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_SYNCED, "node2", "", 0, 1, 1, 0, 0); fail_if(NULL == st[2]); gu_info (" Inherited 5"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (true != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); fail_if (act2_seqno != quorum.act_id); fail_if (prim2_seqno != quorum.conf_id); fail_if (0 != quorum.gcs_proto_ver); fail_if (1 != quorum.repl_proto_ver); fail_if (0 != quorum.appl_proto_ver); gcs_state_msg_destroy (st[0]); gcs_state_msg_destroy (st[1]); gcs_state_msg_destroy (st[2]); } END_TEST START_TEST (gcs_state_msg_test_quorum_remerge) { gcs_state_msg_t* st[3] = { NULL, }; gu_uuid_t state_uuid; gu_uuid_t group1_uuid, group2_uuid; gu_uuid_t prim0_uuid, prim1_uuid, prim2_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group1_uuid, NULL, 0); gu_uuid_generate (&group2_uuid, NULL, 0); gu_uuid_generate (&prim0_uuid, NULL, 0); gu_uuid_generate (&prim1_uuid, NULL, 0); gu_uuid_generate (&prim2_uuid, NULL, 0); gcs_seqno_t prim1_seqno = 123; gcs_seqno_t prim2_seqno = 834; gcs_seqno_t act1_seqno = 345; gcs_seqno_t act2_seqno = 239472508908LL; gcs_state_quorum_t quorum; mark_point(); /* First just nodes from different groups and configurations, none JOINED */ st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim0_uuid, prim2_seqno - 1, act2_seqno - 1,act2_seqno -2, 5, GCS_NODE_STATE_JOINER,GCS_NODE_STATE_NON_PRIM, "node0", "", 0, 1, 1, 0, 0); fail_if(NULL == st[0]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 3, 3, GCS_NODE_STATE_JOINER,GCS_NODE_STATE_NON_PRIM, "node1", "", 0, 1, 0, 0, 0); fail_if(NULL == st[1]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, -1, 5, GCS_NODE_STATE_JOINER,GCS_NODE_STATE_NON_PRIM, "node2", "", 0, 1, 1, 0, 1); fail_if(NULL == st[2]); gu_info (" Remerged 1"); int ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (false != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); fail_if (GCS_SEQNO_ILL != quorum.act_id); fail_if (GCS_SEQNO_ILL != quorum.conf_id); fail_if (-1 != quorum.gcs_proto_ver); fail_if (-1 != quorum.repl_proto_ver); fail_if (-1 != quorum.appl_proto_ver); /* Now make node0 to be joined at least once */ gcs_state_msg_destroy (st[0]); st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim0_uuid, prim2_seqno - 1, act2_seqno - 1, -1, 5, GCS_NODE_STATE_DONOR, GCS_NODE_STATE_NON_PRIM, "node0", "", 0, 1, 1, 3, 0); fail_if(NULL == st[0]); fail_if(3 != gcs_state_msg_get_desync_count(st[0])); gu_info (" Remerged 2"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (true != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); fail_if (act2_seqno - 1 != quorum.act_id); fail_if (prim2_seqno - 1 != quorum.conf_id); fail_if (0 != quorum.gcs_proto_ver); fail_if (1 != quorum.repl_proto_ver); fail_if (0 != quorum.appl_proto_ver); /* Now make node2 to be joined too */ gcs_state_msg_destroy (st[2]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, act2_seqno - 3, 5, GCS_NODE_STATE_JOINED,GCS_NODE_STATE_NON_PRIM, "node2", "", 0, 1, 1, 0, 1); fail_if(NULL == st[2]); gu_info (" Remerged 3"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (true != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); fail_if (act2_seqno != quorum.act_id); fail_if (prim2_seqno != quorum.conf_id); fail_if (0 != quorum.gcs_proto_ver); fail_if (1 != quorum.repl_proto_ver); fail_if (0 != quorum.appl_proto_ver); /* now make node1 joined too: conflict */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno, 3, GCS_NODE_STATE_SYNCED,GCS_NODE_STATE_NON_PRIM, "node1", "", 0, 1, 0, 0, 0); fail_if(NULL == st[1]); gu_info (" Remerged 4"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (false != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); fail_if (GCS_SEQNO_ILL != quorum.act_id); fail_if (GCS_SEQNO_ILL != quorum.conf_id); fail_if (-1 != quorum.gcs_proto_ver); fail_if (-1 != quorum.repl_proto_ver); fail_if (-1 != quorum.appl_proto_ver); /* now make node1 current joiner: should be ignored */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 2, 3, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_JOINER, "node1", "", 0, 1, 0, 0, 0); fail_if(NULL == st[1]); gu_info (" Remerged 5"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); fail_if (0 != ret); fail_if (QUORUM_VERSION != quorum.version); fail_if (true != quorum.primary); fail_if (0 != gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); fail_if (act2_seqno != quorum.act_id); fail_if (prim2_seqno != quorum.conf_id); fail_if (0 != quorum.gcs_proto_ver); fail_if (1 != quorum.repl_proto_ver); fail_if (0 != quorum.appl_proto_ver); gcs_state_msg_destroy (st[0]); gcs_state_msg_destroy (st[1]); gcs_state_msg_destroy (st[2]); } END_TEST START_TEST(gcs_state_msg_test_gh24) { gcs_state_msg_t* st[7] = { NULL, }; gu_uuid_t state_uuid, group_uuid; gu_uuid_generate(&state_uuid, NULL, 0); gu_uuid_generate(&group_uuid, NULL, 0); gu_uuid_t prim_uuid1, prim_uuid2; gu_uuid_generate(&prim_uuid1, NULL, 0); gu_uuid_generate(&prim_uuid2, NULL, 0); gcs_seqno_t prim_seqno1 = 37; int prim_joined1 = 3; gcs_seqno_t prim_seqno2 = 35; int prim_joined2 = 6; gcs_seqno_t received = 0; gcs_seqno_t cached = 0; gcs_state_quorum_t quorum; // first three are 35. st[0] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid2, prim_seqno2, received, cached, prim_joined2, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home0", "", 0, 4, 2, 0, 2); fail_unless(st[0] != 0); st[1] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid2, prim_seqno2, received, cached, prim_joined2, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home1", "", 0, 4, 2, 0, 2); fail_unless(st[1] != 0); st[2] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid2, prim_seqno2, received, cached, prim_joined2, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home2", "", 0, 4, 2, 0, 2); fail_unless(st[2] != 0); // last four are 37. st[3] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, prim_joined1, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home3", "", 0, 4, 2, 0, 3); fail_unless(st[3] != 0); st[4] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, prim_joined1, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home4", "", 0, 4, 2, 0, 2); fail_unless(st[4] != 0); st[5] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, prim_joined1, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home5", "", 0, 4, 2, 0, 2); fail_unless(st[5] != 0); st[6] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, prim_joined1, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_NON_PRIM, "home6", "", 0, 4, 2, 0, 2); fail_unless(st[6] != 0); int ret = gcs_state_msg_get_quorum((const gcs_state_msg_t**)st, 7, &quorum); fail_unless(ret == 0); fail_unless(quorum.primary == true); fail_unless(quorum.conf_id == prim_seqno1); // // but we just have first five nodes, we don't have prim. // // because prim_joined=3 but there are only 2 joined nodes. // ret = gcs_state_msg_get_quorum((const gcs_state_msg_t**)st, 5, // &quorum); // fail_unless(ret == 0); // fail_unless(quorum.primary == false); for(int i=0;i<7;i++) gcs_state_msg_destroy(st[i]); } END_TEST Suite *gcs_state_msg_suite(void) { Suite *s = suite_create("GCS state message"); TCase *tc_basic = tcase_create("gcs_state_msg_basic"); TCase *tc_inherit = tcase_create("gcs_state_msg_inherit"); TCase *tc_remerge = tcase_create("gcs_state_msg_remerge"); suite_add_tcase (s, tc_basic); tcase_add_test (tc_basic, gcs_state_msg_test_basic); suite_add_tcase (s, tc_inherit); tcase_add_test (tc_inherit, gcs_state_msg_test_quorum_inherit); suite_add_tcase (s, tc_remerge); tcase_add_test (tc_remerge, gcs_state_msg_test_quorum_remerge); tcase_add_test (tc_remerge, gcs_state_msg_test_gh24); return s; } galera-3-25.3.20/gcs/src/unit_tests/gcs_defrag_test.cpp0000644000015300001660000001162113042054732022532 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include #include #include "gcs_defrag_test.hpp" #include "../gcs_defrag.hpp" #define TRUE (0 == 0) #define FALSE (!TRUE) static void defrag_check_init (gcs_defrag_t* defrag) { fail_if (defrag->sent_id != GCS_SEQNO_ILL); fail_if (defrag->head != NULL); fail_if (defrag->tail != NULL); fail_if (defrag->size != 0); fail_if (defrag->received != 0); fail_if (defrag->frag_no != 0); } START_TEST (gcs_defrag_test) { ssize_t ret; // The Action char act_buf[] = "Test action smuction"; size_t act_len = sizeof (act_buf); // lengths of three fragments of the action size_t frag1_len = act_len / 3; size_t frag2_len = frag1_len; size_t frag3_len = act_len - frag1_len - frag2_len; // pointer to the three fragments of the action const char* frag1 = act_buf; const char* frag2 = frag1 + frag1_len; const char* frag3 = frag2 + frag2_len; // recv fragments gcs_act_frag_t frg1, frg2, frg3, frg4; gcs_defrag_t defrag; struct gcs_act recv_act; void* tail; mark_point(); #ifndef NDEBUG // debug build breaks this test due to asserts return; #endif // Initialize message parameters frg1.act_id = getpid(); frg1.act_size = act_len; frg1.frag = frag1; frg1.frag_len = frag1_len; frg1.frag_no = 0; frg1.act_type = GCS_ACT_TORDERED; frg1.proto_ver = 0; // normal fragments frg2 = frg3 = frg1; frg2.frag = frag2; frg2.frag_len = frag2_len; frg2.frag_no = frg1.frag_no + 1; frg3.frag = frag3; frg3.frag_len = frag3_len; frg3.frag_no = frg2.frag_no + 1; // bad fragmets to be tried instead of frg2 frg4 = frg2; frg4.frag = "junk"; frg4.frag_len = strlen("junk"); frg4.act_id = frg2.act_id + 1; // wrong action id mark_point(); // ready for the first fragment gcs_defrag_init (&defrag, NULL); defrag_check_init (&defrag); mark_point(); // 1. Try fragment that is not the first ret = gcs_defrag_handle_frag (&defrag, &frg3, &recv_act, FALSE); fail_if (ret != -EPROTO); mark_point(); defrag_check_init (&defrag); // should be no changes // 2. Try first fragment ret = gcs_defrag_handle_frag (&defrag, &frg1, &recv_act, FALSE); fail_if (ret != 0); fail_if (defrag.head == NULL); fail_if (defrag.received != frag1_len); fail_if (defrag.tail != defrag.head + defrag.received); tail = defrag.tail; #define TRY_WRONG_2ND_FRAGMENT(frag) \ ret = gcs_defrag_handle_frag (&defrag, &frag, &recv_act, FALSE); \ if (defrag.frag_no < frag.frag_no) fail_if (ret != -EPROTO); \ else fail_if (ret != 0); \ fail_if (defrag.received != frag1_len); \ fail_if (defrag.tail != tail); // 3. Try first fragment again TRY_WRONG_2ND_FRAGMENT(frg1); // 4. Try third fragment TRY_WRONG_2ND_FRAGMENT(frg3); // 5. Try fouth fragment TRY_WRONG_2ND_FRAGMENT(frg4); // 6. Try second fragment ret = gcs_defrag_handle_frag (&defrag, &frg2, &recv_act, FALSE); fail_if (ret != 0); fail_if (defrag.received != frag1_len + frag2_len); fail_if (defrag.tail != defrag.head + defrag.received); // 7. Try third fragment, last one ret = gcs_defrag_handle_frag (&defrag, &frg3, &recv_act, FALSE); fail_if (ret != (long)act_len); // 8. Check the action fail_if (recv_act.buf_len != (long)act_len); fail_if (strncmp((const char*)recv_act.buf, act_buf, act_len), "Action received: '%s', expected '%s'",recv_act.buf,act_buf); defrag_check_init (&defrag); // should be empty // memleak in recv_act.buf ! // 9. Try the same with local action ret = gcs_defrag_handle_frag (&defrag, &frg1, &recv_act, TRUE); fail_if (ret != 0); // fail_if (defrag.head != NULL); (and now we may allocate it for cache) ret = gcs_defrag_handle_frag (&defrag, &frg2, &recv_act, TRUE); fail_if (ret != 0); // fail_if (defrag.head != NULL); (and now we may allocate it for cache) ret = gcs_defrag_handle_frag (&defrag, &frg3, &recv_act, TRUE); fail_if (ret != (long)act_len); // fail_if (defrag.head != NULL); (and now we may allocate it for cache) // 10. Check the action fail_if (recv_act.buf_len != (long)act_len); // fail_if (recv_act.buf != NULL); (and now we may allocate it for cache) defrag_check_init (&defrag); // should be empty // memleack in recv_act.buf ! } END_TEST Suite *gcs_defrag_suite(void) { Suite *suite = suite_create("GCS defragmenter"); TCase *tcase = tcase_create("gcs_defrag"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_defrag_test); return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_core_test.hpp0000644000015300001660000000030313042054732022232 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_core_test__ #define __gcs_core_test__ extern Suite *gcs_core_suite(void); #endif /* __gu_core_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_proto_test.hpp0000644000015300001660000000030713042054732022451 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_proto_test__ #define __gcs_proto_test__ extern Suite *gcs_proto_suite(void); #endif /* __gu_proto_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_tests.cpp0000644000015300001660000000414213042054732021405 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ #include // printf() #include // strcmp() #include // EXIT_SUCCESS | EXIT_FAILURE #include #include #include "gcs_comp_test.hpp" #include "gcs_sm_test.hpp" #include "gcs_state_msg_test.hpp" #include "gcs_fifo_test.hpp" #include "gcs_proto_test.hpp" #include "gcs_defrag_test.hpp" #include "gcs_node_test.hpp" #include "gcs_memb_test.hpp" #include "gcs_group_test.hpp" #include "gcs_backend_test.hpp" #include "gcs_core_test.hpp" #include "gcs_fc_test.hpp" typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gcs_comp_suite, gcs_send_monitor_suite, gcs_state_msg_suite, gcs_fifo_suite, gcs_proto_suite, gcs_defrag_suite, gcs_node_suite, gcs_memb_suite, gcs_group_suite, gcs_backend_suite, gcs_core_suite, gcs_fc_suite, NULL }; int main(int argc, char* argv[]) { bool const nofork(((argc > 1) && !strcmp(argv[1], "nofork")) ? true : false); int i = 0; int failed = 0; FILE* log_file = NULL; if (!nofork) { log_file = fopen ("gcs_tests.log", "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); gu_conf_self_tstamp_on(); while (suites[i]) { SRunner* sr = srunner_create(suites[i]()); gu_info ("#########################"); gu_info ("Test %d.", i); gu_info ("#########################"); if (nofork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all (sr, CK_NORMAL); failed += srunner_ntests_failed (sr); srunner_free (sr); i++; } if (log_file) fclose (log_file); printf ("Total test failed: %d\n", failed); return (failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; } /* When the suite compiled in debug mode, returns number of allocated bytes */ ssize_t gcs_tests_get_allocated() { ssize_t total; ssize_t allocs; ssize_t reallocs; ssize_t deallocs; void gu_mem_stats (ssize_t*, ssize_t*, ssize_t*, ssize_t*); gu_mem_stats (&total, &allocs, &reallocs, &deallocs); return total; } galera-3-25.3.20/gcs/src/unit_tests/SConscript0000644000015300001660000000530013042054732020712 0ustar jenkinsjenkins Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' #/galerautils/src #/gcache/src #/gcs/src ''')) # For C-style logging env.Append(CPPFLAGS = ' -DGALERA_LOG_H_ENABLE_CXX -Wno-variadic-macros') # Disable old style cast warns until code is fixed env.Append(CPPFLAGS = ' -Wno-old-style-cast') # Allow zero sized arrays env.Replace(CCFLAGS = env['CCFLAGS'].replace('-pedantic', '')) env.Append(CPPFLAGS = ' -Wno-missing-field-initializers') env.Append(CPPFLAGS = ' -Wno-effc++') gcs_tests_sources = Split(''' gcs_tests.cpp gcs_fifo_test.cpp ../gcs_fifo_lite.cpp gcs_sm_test.cpp ../gcs_sm.cpp gcs_comp_test.cpp ../gcs_comp_msg.cpp gcs_state_msg_test.cpp ../gcs_state_msg.cpp gcs_backend_test.cpp ../gcs_backend.cpp gcs_proto_test.cpp ../gcs_act_proto.cpp gcs_defrag_test.cpp ../gcs_defrag.cpp gcs_node_test.cpp ../gcs_node.cpp gcs_group_test.cpp gcs_memb_test.cpp ../gcs_group.cpp gcs_core_test.cpp ../gcs_core.cpp ../gcs_dummy.cpp ../gcs_msg_type.cpp ../gcs.cpp ../gcs_params.cpp gcs_fc_test.cpp ../gcs_fc.cpp ''') #env.Append(CPPFLAGS = ' -DGCS_USE_GCOMM -DGCS_CORE_TESTING -DGCS_DUMMY_TESTING') env.Append(CPPFLAGS = ' -DGCS_CORE_TESTING -DGCS_DUMMY_TESTING') env.Append(LIBS = File('#/gcache/src/libgcache.a')) env.Append(LIBS = File('#/gcomm/src/libgcomm.a')) env.Append(LIBS = File('#/galerautils/src/libgalerautils++.a')) env.Append(LIBS = File('#/galerautils/src/libgalerautils.a')) env.Append(LIBS = ['m', 'ssl', 'crypto']) gcs_tests = env.Program(target = 'gcs_tests', source = gcs_tests_sources, OBJPREFIX = 'gcs-tests-', LINK = env['CXX']) env.Test("gcs_tests.passed", gcs_tests) env.Alias("test", "gcs_tests.passed") Clean(gcs_tests, '#/gcs_tests.log') galera-3-25.3.20/gcs/src/unit_tests/gcs_backend_test.hpp0000644000015300001660000000031713042054732022676 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_backend_test__ #define __gcs_backend_test__ extern Suite *gcs_backend_suite(void); #endif /* __gu_backend_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_state_msg_test.hpp0000644000015300001660000000031013042054732023266 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gcs_state_msg_test__ #define __gcs_state_msg_test__ Suite *gcs_state_msg_suite(void); #endif /* __gcs_state_msg_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_sm_test.hpp0000644000015300001660000000026613042054732021731 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gcs_sm_test__ #define __gcs_sm_test__ Suite *gcs_send_monitor_suite(void); #endif /* __gcs_sm_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_core_test.cpp0000644000015300001660000010173513042054732022240 0ustar jenkinsjenkins/* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * @file * * Defines unit tests for gcs_core (and as a result tests gcs_group and * a dummy backend which gcs_core depends on) * * Most of the checks require independent sending and receiving threads. * Approach 1 is to start separate threads for both sending and receiving * and use the current thread of execution to sychronize between them: * * CORE_RECV_START(act_r) * CORE_SEND_START(act_s) * while (gcs_core_send_step(Core)) { // step through action fragments * (do something) * }; * CORE_SEND_END(act_s, ret) // check return code * CORE_RECV_END(act_r, size, type) // makes checks against size and type * * A simplified approach 2 is: * * CORE_SEND_START(act_s) * while (gcs_core_send_step(Core)) { // step through action fragments * (do something) * }; * CORE_SEND_END(act_s, ret) // check return code * CORE_RECV_ACT(act_r, size, type) // makes checks agains size and type * * In the first approach group messages will be received concurrently. * In the second approach messages will wait in queue and be fetched afterwards * */ #define GCS_STATE_MSG_ACCESS #include "../gcs_core.hpp" #include "../gcs_dummy.hpp" #include "../gcs_seqno.hpp" #include "../gcs_state_msg.hpp" #include #include #include #include #include "gcs_core_test.hpp" extern ssize_t gcs_tests_get_allocated(); static const long UNKNOWN_SIZE = 1234567890; // some unrealistic number static gcs_core_t* Core = NULL; static gcs_backend_t* Backend = NULL; static gcs_seqno_t Seqno = 0; typedef struct action { const struct gu_buf* in; void* out; const void* local; ssize_t size; gcs_act_type_t type; gcs_seqno_t seqno; gu_thread_t thread; action() { } action(const struct gu_buf* a_in, void* a_out, const void* a_local, ssize_t a_size, gcs_act_type_t a_type, gcs_seqno_t a_seqno, gu_thread_t a_thread) : in (a_in), out (a_out), local (a_local), size (a_size), type (a_type), seqno (a_seqno), thread (a_thread) { } } action_t; //static struct action_t RecvAct; static const ssize_t FRAG_SIZE = 4; // desirable action fragment size // 1-fragment action static const char act1_str[] = "101"; static const struct gu_buf act1[1] = { { act1_str, sizeof(act1_str) } }; // 2-fragment action, with buffers aligned with FRAG_SIZE static const char act2_str[] = "202122"; static const struct gu_buf act2[2] = { { "2021", 4 }, { "22", 3 } /* 4 + 3 = 7 = sizeof(act2_str) */ }; // 3-fragment action, with unaligned buffers static const char act3_str[] = "3031323334"; static const struct gu_buf act3[] = { { "303", 3 }, { "13", 2 }, { "23", 2 }, { "334", 4 } /* 3 + 2 + 2 + 4 = 11 = sizeof(act3_str) */ }; // action receive thread, returns after first action received, stores action // in the passed action_t object, uses global Core to receive static void* core_recv_thread (void* arg) { action_t* act = (action_t*)arg; // @todo: refactor according to new gcs_act types struct gcs_act_rcvd recv_act; act->size = gcs_core_recv (Core, &recv_act, GU_TIME_ETERNITY); act->out = (void*)recv_act.act.buf; act->local = recv_act.local; act->type = recv_act.act.type; act->seqno = recv_act.id; return (NULL); } // this macro logs errors from within a function #define FAIL_IF(expr, format, ...) \ if (expr) { \ gu_fatal ("FAIL: "format, __VA_ARGS__, NULL); \ fail_if (true, format, __VA_ARGS__, NULL); \ return true; \ } /* * Huge macros which follow below cannot be functions for the purpose * of correct line reporting. */ // Start a thread to receive an action // args: action_t object static inline bool CORE_RECV_START(action_t* act) { return (0 != gu_thread_create (&act->thread, NULL, core_recv_thread, act)); } static bool COMMON_RECV_CHECKS(action_t* act, const char* buf, ssize_t size, gcs_act_type_t type, gcs_seqno_t* seqno) { FAIL_IF (size != UNKNOWN_SIZE && size != act->size, "gcs_core_recv(): expected %lld, returned %zd (%s)", (long long) size, act->size, strerror (-act->size)); FAIL_IF (act->type != type, "type does not match: expected %d, got %d", type, act->type); FAIL_IF (act->size > 0 && act->out == NULL, "null buffer received with positive size: %zu", act->size); if (act->type == GCS_ACT_STATE_REQ) return false; // action is ordered only if it is of type GCS_ACT_TORDERED and not an error if (act->seqno > 0) { FAIL_IF (GCS_ACT_TORDERED != act->type, "GCS_ACT_TORDERED != act->type (%d), while act->seqno: %lld", act->type, (long long)act->seqno); FAIL_IF ((*seqno + 1) != act->seqno, "expected seqno %lld, got %lld", (long long)(*seqno + 1), (long long)act->seqno); *seqno = *seqno + 1; } if (NULL != buf) { if (GCS_ACT_TORDERED == act->type) { // local action buffer should not be copied FAIL_IF (act->local != act->in, "Received buffer ptr is not the same as sent: %p != %p", act->in, act->local, NULL); FAIL_IF (memcmp (buf, act->out, act->size), "Received buffer contents is not the same as sent: " "'%s' != '%s'", buf, (char*)act->out); } else { FAIL_IF (act->local == buf, "Received the same buffer ptr as sent", NULL); FAIL_IF (memcmp (buf, act->out, act->size), "Received buffer contents is not the same as sent", NULL); } } return false; } // Wait for recv thread to complete, perform required checks // args: action_t, expected size, expected type static bool CORE_RECV_END(action_t* act, const void* buf, ssize_t size, gcs_act_type_t type) { { int ret = gu_thread_join (act->thread, NULL); act->thread = (gu_thread_t)-1; FAIL_IF(0 != ret, "Failed to join recv thread: %ld (%s)", ret, strerror (ret)); } return COMMON_RECV_CHECKS (act, (const char*)buf, size, type, &Seqno); } // Receive action in one call, perform required checks // args: pointer to action_t, expected size, expected type static bool CORE_RECV_ACT (action_t* act, const void* buf, // single buffer action repres. ssize_t size, gcs_act_type_t type) { struct gcs_act_rcvd recv_act; act->size = gcs_core_recv (Core, &recv_act, GU_TIME_ETERNITY); act->out = (void*)recv_act.act.buf; act->local = recv_act.local; act->type = recv_act.act.type; act->seqno = recv_act.id; return COMMON_RECV_CHECKS (act, (const char*)buf, size, type, &Seqno); } // Sending always needs to be done via separate thread (uses lock-stepping) void* core_send_thread (void* arg) { action_t* act = (action_t*)arg; // use seqno field to pass the return code, it is signed 8-byte integer act->seqno = gcs_core_send (Core, act->in, act->size, act->type); return (NULL); } // Start a thread to send an action // args: action_t object static bool CORE_SEND_START(action_t* act) { return (0 != gu_thread_create (&act->thread, NULL, core_send_thread, act)); } // Wait for send thread to complete, perform required checks // args: action_t, expected return code static bool CORE_SEND_END(action_t* act, long ret) { { long _ret = gu_thread_join (act->thread, NULL); act->thread = (gu_thread_t)-1; FAIL_IF (0 != _ret, "Failed to join recv thread: %ld (%s)", _ret, strerror (_ret)); } FAIL_IF (ret != act->seqno, "gcs_core_send(): expected %lld, returned %lld (%s)", (long long) ret, (long long) act->seqno, strerror (-act->seqno)); return false; } // check if configuration is the one that we expected static long core_test_check_conf (const gcs_act_conf_t* conf, bool prim, long my_idx, long memb_num) { long ret = 0; if ((conf->conf_id >= 0) != prim) { gu_error ("Expected %s conf, received %s", prim ? "PRIMARY" : "NON-PRIMARY", (conf->conf_id >= 0) ? "PRIMARY" : "NON-PRIMARY"); ret = -1; } if (conf->my_idx != my_idx) { gu_error ("Expected my_idx = %ld, got %ld", my_idx, conf->my_idx); ret = -1; } if (conf->my_idx != my_idx) { gu_error ("Expected my_idx = %ld, got %ld", my_idx, conf->my_idx); ret = -1; } return ret; } static long core_test_set_payload_size (ssize_t s) { long ret; const ssize_t arbitrary_pkt_size = s + 64; // big enough for payload to fit ret = gcs_core_set_pkt_size (Core, arbitrary_pkt_size); if (ret <= 0) { gu_error("set_pkt_size(%zd) returned: %ld (%s)", arbitrary_pkt_size, ret, strerror (-ret)); return ret; } ret = gcs_core_set_pkt_size (Core, arbitrary_pkt_size - ret + s); if (ret != s) { gu_error("set_pkt_size() returned: %ld instead of %zd", ret, s); return ret; } return 0; } // Initialises core and backend objects + some common tests static inline void core_test_init (bool bootstrap = true, const char* name = "core_test") { long ret; action_t act; mark_point(); gu_config_t* config = gu_config_create (); fail_if (config == NULL); Core = gcs_core_create (config, NULL, name, "aaa.bbb.ccc.ddd:xxxx", 0, 0); fail_if (NULL == Core); Backend = gcs_core_get_backend (Core); fail_if (NULL == Backend); Seqno = 0; // reset seqno ret = core_test_set_payload_size (FRAG_SIZE); fail_if (-EBADFD != ret, "Expected -EBADFD, got: %ld (%s)", ret, strerror(-ret)); ret = gcs_core_open (Core, "yadda-yadda", "owkmevc", 1); fail_if (-EINVAL != ret, "Expected -EINVAL, got %ld (%s)", ret, strerror(-ret)); ret = gcs_core_open (Core, "yadda-yadda", "dummy://", bootstrap); fail_if (0 != ret, "Failed to open core connection: %ld (%s)", ret, strerror(-ret)); if (!bootstrap) { gcs_core_send_lock_step (Core, true); mark_point(); return; } // receive first configuration message fail_if (CORE_RECV_ACT (&act, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act.out, bootstrap, 0, 1)); free (act.out); // this will configure backend to have desired fragment size ret = core_test_set_payload_size (FRAG_SIZE); fail_if (0 != ret, "Failed to set up the message payload size: %ld (%s)", ret, strerror(-ret)); // try to send an action to check that everything's alright ret = gcs_core_send (Core, act1, sizeof(act1_str), GCS_ACT_TORDERED); fail_if (ret != sizeof(act1_str), "Expected %d, got %d (%s)", sizeof(act1_str), ret, strerror (-ret)); gu_warn ("Next CORE_RECV_ACT fails under valgrind"); act.in = act1; fail_if (CORE_RECV_ACT (&act, act1_str, sizeof(act1_str),GCS_ACT_TORDERED)); ret = gcs_core_send_join (Core, Seqno); fail_if (ret != 0, "gcs_core_send_join(): %ld (%s)", ret, strerror(-ret)); // no action to be received (we're joined already) ret = gcs_core_send_sync (Core, Seqno); fail_if (ret != 0, "gcs_core_send_sync(): %ld (%s)", ret, strerror(-ret)); fail_if (CORE_RECV_ACT(&act,NULL,sizeof(gcs_seqno_t),GCS_ACT_SYNC)); fail_if (Seqno != gcs_seqno_gtoh(*(gcs_seqno_t*)act.out)); gcs_core_send_lock_step (Core, true); mark_point(); } // cleans up core and backend objects static inline void core_test_cleanup () { long ret; char tmp[1]; action_t act; fail_if (NULL == Core); fail_if (NULL == Backend); // to fetch self-leave message fail_if (CORE_RECV_START (&act)); ret = gcs_core_close (Core); fail_if (0 != ret, "Failed to close core: %ld (%s)", ret, strerror (-ret)); ret = CORE_RECV_END (&act, NULL, UNKNOWN_SIZE, GCS_ACT_CONF); fail_if (ret, "ret: %ld (%s)", ret, strerror(-ret)); free (act.out); // check that backend is closed too ret = Backend->send (Backend, tmp, sizeof(tmp), GCS_MSG_ACTION); fail_if (ret != -EBADFD); ret = gcs_core_destroy (Core); fail_if (0 != ret, "Failed to destroy core: %ld (%s)", ret, strerror (-ret)); { ssize_t allocated; allocated = gcs_tests_get_allocated(); fail_if (0 != allocated, "Expected 0 allocated bytes, found %zd", allocated); } } // just a smoke test for core API START_TEST (gcs_core_test_api) { core_test_init (); fail_if (NULL == Core); fail_if (NULL == Backend); long ret; long tout = 100; // 100 ms timeout const struct gu_buf* act = act3; const void* act_buf = act3_str; size_t act_size = sizeof(act3_str); action_t act_s(act, NULL, NULL, act_size, GCS_ACT_TORDERED, -1, (gu_thread_t)-1); action_t act_r(act, NULL, NULL, -1, (gcs_act_type_t)-1, -1, (gu_thread_t)-1); long i = 5; // test basic fragmentaiton while (i--) { long frags = (act_size - 1)/FRAG_SIZE + 1; gu_info ("Iteration %ld: act: %s, size: %zu, frags: %ld", i, act, act_size, frags); fail_if (CORE_SEND_START (&act_s)); while ((ret = gcs_core_send_step (Core, 3*tout)) > 0) { frags--; gu_info ("frags: %ld", frags); // usleep (1000); } fail_if (ret != 0, "gcs_core_send_step() returned: %ld (%s)", ret, strerror(-ret)); fail_if (frags != 0, "frags = %ld, instead of 0", frags); fail_if (CORE_SEND_END (&act_s, act_size)); fail_if (CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_TORDERED)); ret = gcs_core_set_last_applied (Core, Seqno); fail_if (ret != 0, "gcs_core_set_last_applied(): %ld (%s)", ret, strerror(-ret)); fail_if (CORE_RECV_ACT (&act_r, NULL, sizeof(gcs_seqno_t), GCS_ACT_COMMIT_CUT)); fail_if (Seqno != gcs_seqno_gtoh(*(gcs_seqno_t*)act_r.out)); free (act_r.out); } // send fake flow control action, its contents is not important gcs_core_send_fc (Core, act, act_size); fail_if (ret != 0, "gcs_core_send_fc(): %ld (%s)", ret, strerror(-ret)); fail_if (CORE_RECV_ACT(&act_r, act, act_size, GCS_ACT_FLOW)); core_test_cleanup (); } END_TEST // do a single send step, compare with the expected result static inline bool CORE_SEND_STEP (gcs_core_t* core, long timeout, long ret) { long err = gcs_core_send_step (core, timeout); FAIL_IF (err < 0, "gcs_core_send_step(): %ld (%s)", err, strerror (-err)); if (ret >= 0) { FAIL_IF (err != ret, "gcs_core_send_step(): expected %ld, got %ld", ret, err); } return false; } static bool DUMMY_INJECT_COMPONENT (gcs_backend_t* backend, const gcs_comp_msg_t* comp) { long ret = gcs_dummy_inject_msg (Backend, comp, gcs_comp_msg_size(comp), GCS_MSG_COMPONENT, GCS_SENDER_NONE); FAIL_IF (ret <= 0, "gcs_dummy_inject_msg(): %ld (%s)", ret, strerror(ret)); return false; } static bool DUMMY_INSTALL_COMPONENT (gcs_backend_t* backend, const gcs_comp_msg_t* comp) { bool primary = gcs_comp_msg_primary (comp); long my_idx = gcs_comp_msg_self (comp); long members = gcs_comp_msg_num (comp); action_t act; FAIL_IF (gcs_dummy_set_component(Backend, comp), "", NULL); FAIL_IF (DUMMY_INJECT_COMPONENT (Backend, comp), "", NULL); FAIL_IF (CORE_RECV_ACT (&act, NULL, UNKNOWN_SIZE, GCS_ACT_CONF), "", NULL); FAIL_IF (core_test_check_conf((const gcs_act_conf_t*)act.out, primary, my_idx, members), "", NULL); free (act.out); return false; } START_TEST (gcs_core_test_own) { long const tout = 1000; // 100 ms timeout const struct gu_buf* act = act2; const void* act_buf = act2_str; size_t act_size = sizeof(act2_str); action_t act_s(act, NULL, NULL, act_size, GCS_ACT_TORDERED, -1, (gu_thread_t)-1); action_t act_r(act, NULL, NULL, -1, (gcs_act_type_t)-1, -1, (gu_thread_t)-1); // Create primary and non-primary component messages gcs_comp_msg_t* prim = gcs_comp_msg_new (true, false, 0, 1, 0); gcs_comp_msg_t* non_prim = gcs_comp_msg_new (false, false, 0, 1, 0); fail_if (NULL == prim); fail_if (NULL == non_prim); gcs_comp_msg_add (prim, "node1", 0); gcs_comp_msg_add (non_prim, "node1", 1); core_test_init (); ///////////////////////////////////////////// /// check behaviour in transitional state /// ///////////////////////////////////////////// fail_if (CORE_RECV_START (&act_r)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag usleep (10000); // resolve race between sending and setting transitional gcs_dummy_set_transitional (Backend); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 2nd frag fail_if (CORE_SEND_STEP (Core, tout, 0)); // no frags left fail_if (NULL != act_r.out); // should not have received anything fail_if (gcs_dummy_set_component (Backend, prim)); // return to PRIM state fail_if (CORE_SEND_END (&act_s, act_size)); fail_if (CORE_RECV_END (&act_r, act_buf, act_size, GCS_ACT_TORDERED)); /* * TEST CASE 1: Action was sent successfully, but NON_PRIM component * happened before any fragment could be delivered. * EXPECTED OUTCOME: action is received with -ENOTCONN instead of global * seqno */ fail_if (DUMMY_INJECT_COMPONENT (Backend, non_prim)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag fail_if (CORE_SEND_STEP (Core, tout, 1)); // 2nd frag fail_if (CORE_SEND_END (&act_s, act_size)); fail_if (gcs_dummy_set_component(Backend, non_prim)); fail_if (CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, false, 0, 1)); free (act_r.out); fail_if (CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_TORDERED)); fail_if (-ENOTCONN != act_r.seqno, "Expected -ENOTCONN, received %ld (%s)", act_r.seqno, strerror (-act_r.seqno)); /* * TEST CASE 2: core in NON_PRIM state. There is attempt to send an * action. * EXPECTED OUTCOME: CORE_SEND_END should return -ENOTCONN after 1st * fragment send fails. */ fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag fail_if (CORE_SEND_STEP (Core, tout, 0)); // bail out after 1st frag fail_if (CORE_SEND_END (&act_s, -ENOTCONN)); /* * TEST CASE 3: Backend in NON_PRIM state. There is attempt to send an * action. * EXPECTED OUTCOME: CORE_SEND_END should return -ENOTCONN after 1st * fragment send fails. */ fail_if (DUMMY_INSTALL_COMPONENT (Backend, prim)); fail_if (gcs_dummy_set_component(Backend, non_prim)); fail_if (DUMMY_INJECT_COMPONENT (Backend, non_prim)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag fail_if (CORE_SEND_END (&act_s, -ENOTCONN)); fail_if (CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, false, 0, 1)); free (act_r.out); /* * TEST CASE 4: Action was sent successfully, but NON_PRIM component * happened in between delivered fragments. * EXPECTED OUTCOME: action is received with -ENOTCONN instead of global * seqno. */ fail_if (DUMMY_INSTALL_COMPONENT (Backend, prim)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag fail_if (DUMMY_INJECT_COMPONENT (Backend, non_prim)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 2nd frag fail_if (CORE_SEND_END (&act_s, act_size)); fail_if (CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, false, 0, 1)); free (act_r.out); fail_if (CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_TORDERED)); fail_if (-ENOTCONN != act_r.seqno, "Expected -ENOTCONN, received %ld (%s)", act_r.seqno, strerror (-act_r.seqno)); /* * TEST CASE 5: Action is being sent and received concurrently. In between * two fragments recv thread receives NON_PRIM and then PRIM components. * EXPECTED OUTCOME: CORE_RECV_ACT should receive the action with -ERESTART * instead of seqno. */ fail_if (DUMMY_INSTALL_COMPONENT (Backend, prim)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag usleep (100000); // make sure 1st fragment gets in before new component fail_if (DUMMY_INSTALL_COMPONENT (Backend, non_prim)); fail_if (DUMMY_INSTALL_COMPONENT (Backend, prim)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 2nd frag fail_if (CORE_SEND_END (&act_s, act_size)); fail_if (CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_TORDERED)); fail_if (-ERESTART != act_r.seqno, "Expected -ERESTART, received %ld (%s)", act_r.seqno, strerror (-act_r.seqno)); /* * TEST CASE 6: Action has 3 fragments, 2 were sent successfully but the * 3rd failed because backend is in NON_PRIM. In addition NON_PRIM component * happened in between delivered fragments. * subcase 1: new component received first * subcase 2: 3rd fragment is sent first * EXPECTED OUTCOME: CORE_SEND_END should return -ENOTCONN after 3rd * fragment send fails. */ act = act3; act_buf = act3_str; act_size = sizeof(act3_str); act_s.in = act; act_s.size = act_size; // subcase 1 fail_if (DUMMY_INSTALL_COMPONENT (Backend, prim)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag fail_if (DUMMY_INJECT_COMPONENT (Backend, non_prim)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 2nd frag usleep (500000); // fail_if_seq fail_if (gcs_dummy_set_component(Backend, non_prim)); fail_if (CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, false, 0, 1)); free (act_r.out); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 3rd frag fail_if (CORE_SEND_END (&act_s, -ENOTCONN)); // subcase 2 fail_if (DUMMY_INSTALL_COMPONENT (Backend, prim)); fail_if (CORE_SEND_START (&act_s)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 1st frag fail_if (DUMMY_INJECT_COMPONENT (Backend, non_prim)); fail_if (CORE_SEND_STEP (Core, tout, 1)); // 2nd frag usleep (1000000); fail_if (gcs_dummy_set_component(Backend, non_prim)); fail_if (CORE_SEND_STEP (Core, 4*tout, 1)); // 3rd frag fail_if (CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, false, 0, 1)); free (act_r.out); fail_if (CORE_SEND_END (&act_s, -ENOTCONN)); gu_free (prim); gu_free (non_prim); core_test_cleanup (); } END_TEST /* * Disabled test because it is too slow and timeouts on crowded * build systems like e.g. build.opensuse.org START_TEST (gcs_core_test_gh74) { core_test_init(true, "node1"); // set frag size large enough to avoid fragmentation. gu_info ("set payload size = 1024"); core_test_set_payload_size(1024); // new primary comp message. gcs_comp_msg_t* prim = gcs_comp_msg_new (true, false, 0, 2, 0); fail_if (NULL == prim); gcs_comp_msg_add(prim, "node1", 0); gcs_comp_msg_add(prim, "node2", 1); // construct state transform request. static const char* req_ptr = "12345"; static const size_t req_size = 6; static const char* donor = ""; // from *any* static const size_t donor_len = strlen(donor) + 1; size_t act_size = req_size + donor_len; char* act_ptr = 0; act_ptr = (char*)gu_malloc(act_size); memcpy(act_ptr, donor, donor_len); memcpy(act_ptr + donor_len, req_ptr, req_size); // serialize request into message. gcs_act_frag_t frg; frg.proto_ver = gcs_core_group_protocol_version(Core); frg.frag_no = 0; frg.act_id = 1; frg.act_size = act_size; frg.act_type = GCS_ACT_STATE_REQ; char msg_buf[1024]; fail_if(gcs_act_proto_write(&frg, msg_buf, sizeof(msg_buf))); memcpy(const_cast(frg.frag), act_ptr, act_size); size_t msg_size = act_size + gcs_act_proto_hdr_size(frg.proto_ver); // gu_free(act_ptr); // state exchange message. gu_uuid_t state_uuid; gu_uuid_generate(&state_uuid, NULL, 0); gcs_core_set_state_uuid(Core, &state_uuid); // construct uuid message from node1. size_t uuid_len = sizeof(state_uuid); char uuid_buf[uuid_len]; memcpy(uuid_buf, &state_uuid, uuid_len); gcs_state_msg_t* state_msg = NULL; const gcs_group_t* group = gcs_core_get_group(Core); // state exchange message from node1 state_msg = gcs_group_get_state(group); state_msg->state_uuid = state_uuid; size_t state_len = gcs_state_msg_len (state_msg); char state_buf[state_len]; gcs_state_msg_write (state_buf, state_msg); gcs_state_msg_destroy (state_msg); // state exchange message from node2 state_msg = gcs_state_msg_create(&state_uuid, &GU_UUID_NIL, &GU_UUID_NIL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, 0, GCS_NODE_STATE_NON_PRIM, GCS_NODE_STATE_PRIM, "node2", "127.0.0.1", group->gcs_proto_ver, group->repl_proto_ver, group->appl_proto_ver, 0); size_t state_len2 = gcs_state_msg_len (state_msg); char state_buf2[state_len2]; gcs_state_msg_write (state_buf2, state_msg); gcs_state_msg_destroy (state_msg); action_t act_r(NULL, NULL, NULL, -1, (gcs_act_type_t)-1, -1, (gu_thread_t)-1); // ========== from node1's view ========== fail_if (gcs_dummy_set_component(Backend, prim)); fail_if (DUMMY_INJECT_COMPONENT(Backend, prim)); gu_free(prim); CORE_RECV_START(&act_r); // we have to start another thread here. // otherwise messages to node1 can not be in right order. for(;;) { usleep(10000); // make sure node1 already changed its status to WAIT_STATE_MSG if (gcs_group_state(group) == GCS_GROUP_WAIT_STATE_MSG) { break; } } // then STR sneaks before new configuration is delivered. fail_if (gcs_dummy_inject_msg(Backend, msg_buf, msg_size, GCS_MSG_ACTION, 1) != (int)msg_size); // then state exchange message from node2. fail_if (gcs_dummy_inject_msg(Backend, state_buf2, state_len2, GCS_MSG_STATE_MSG, 1) != (int)state_len2); // expect STR is lost here. fail_if (CORE_RECV_END(&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, true, 0, 2)); free(act_r.out); core_test_cleanup(); // ========== from node2's view ========== core_test_init(false, "node2"); // set frag size large enough to avoid fragmentation. gu_info ("set payload size = 1024"); core_test_set_payload_size(1024); prim = gcs_comp_msg_new (true, false, 1, 2, 0); fail_if (NULL == prim); gcs_comp_msg_add(prim, "node1", 0); gcs_comp_msg_add(prim, "node2", 1); // node1 and node2 joins. // now node2's status == GCS_NODE_STATE_PRIM fail_if (gcs_dummy_set_component(Backend, prim)); fail_if (DUMMY_INJECT_COMPONENT(Backend, prim)); gu_free(prim); fail_if (gcs_dummy_inject_msg(Backend, uuid_buf, uuid_len, GCS_MSG_STATE_UUID, 0) != (int)uuid_len); fail_if (gcs_dummy_inject_msg(Backend, state_buf, state_len, GCS_MSG_STATE_MSG, 0) != (int)state_len); fail_if (CORE_RECV_ACT(&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, true, 1, 2)); free(act_r.out); // then node3 joins. prim = gcs_comp_msg_new (true, false, 1, 3, 0); fail_if (NULL == prim); gcs_comp_msg_add(prim, "node1", 0); gcs_comp_msg_add(prim, "node2", 1); gcs_comp_msg_add(prim, "node3", 2); fail_if (gcs_dummy_set_component(Backend, prim)); fail_if (DUMMY_INJECT_COMPONENT(Backend, prim)); gu_free(prim); // generate a new state uuid. gu_uuid_generate(&state_uuid, NULL, 0); memcpy(uuid_buf, &state_uuid, uuid_len); // state exchange message from node3 group = gcs_core_get_group(Core); state_msg = gcs_state_msg_create(&state_uuid, &GU_UUID_NIL, &GU_UUID_NIL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, 0, GCS_NODE_STATE_NON_PRIM, GCS_NODE_STATE_PRIM, "node3", "127.0.0.1", group->gcs_proto_ver, group->repl_proto_ver, group->appl_proto_ver, 0); size_t state_len3 = gcs_state_msg_len (state_msg); char state_buf3[state_len3]; gcs_state_msg_write (state_buf3, state_msg); gcs_state_msg_destroy (state_msg); // updating state message from node1. group = gcs_core_get_group(Core); state_msg = gcs_group_get_state(group); state_msg->flags = GCS_STATE_FREP | GCS_STATE_FCLA; state_msg->prim_state = GCS_NODE_STATE_JOINED; state_msg->current_state = GCS_NODE_STATE_SYNCED; state_msg->state_uuid = state_uuid; state_msg->name = "node1"; gcs_state_msg_write(state_buf, state_msg); gcs_state_msg_destroy(state_msg); fail_if (gcs_dummy_inject_msg(Backend, uuid_buf, uuid_len, GCS_MSG_STATE_UUID, 0) != (int)uuid_len); fail_if (gcs_dummy_inject_msg(Backend, state_buf, state_len, GCS_MSG_STATE_MSG, 0) != (int)state_len); // STR sneaks. // we have to make same message exists in sender queue too. // otherwise we will get following log // "FIFO violation: queue empty when local action received" const struct gu_buf act = {act_ptr, (ssize_t)act_size}; action_t act_s(&act, NULL, NULL, act_size, GCS_ACT_STATE_REQ, -1, (gu_thread_t)-1); CORE_SEND_START(&act_s); for(;;) { usleep(10000); gcs_fifo_lite_t* fifo = gcs_core_get_fifo(Core); void* item = gcs_fifo_lite_get_head(fifo); if (item) { gcs_fifo_lite_release(fifo); break; } } fail_if (gcs_dummy_inject_msg(Backend, msg_buf, msg_size, GCS_MSG_ACTION, 1) != (int)msg_size); fail_if (gcs_dummy_inject_msg(Backend, state_buf3, state_len3, GCS_MSG_STATE_MSG, 2) != (int)state_len3); // expect STR and id == -EAGAIN. fail_if (CORE_RECV_ACT(&act_r, act_ptr, act_size, GCS_ACT_STATE_REQ)); fail_if (act_r.seqno != -EAGAIN); free(act_r.out); fail_if (CORE_RECV_ACT(&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CONF)); fail_if (core_test_check_conf((const gcs_act_conf_t*)act_r.out, true, 1, 3)); free(act_r.out); // core_test_cleanup(); // ========== gu_free(act_ptr); } END_TEST */ #if 0 // requires multinode support from gcs_dummy START_TEST (gcs_core_test_foreign) { core_test_init (); core_test_cleanup (); } END_TEST #endif // 0 Suite *gcs_core_suite(void) { Suite *suite = suite_create("GCS core context"); TCase *tcase = tcase_create("gcs_core"); suite_add_tcase (suite, tcase); tcase_set_timeout(tcase, 60); bool skip = false; if (skip == false) { tcase_add_test (tcase, gcs_core_test_api); tcase_add_test (tcase, gcs_core_test_own); // tcase_add_test (tcase, gcs_core_test_foreign); // tcase_add_test (tcase, gcs_core_test_gh74); } return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_group_test.cpp0000644000015300001660000005176413042054732022452 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ #include #include #include #include #include "../gcs_group.hpp" #include "../gcs_act_proto.hpp" #include "../gcs_comp_msg.hpp" #include #include "gcs_group_test.hpp" #define TRUE (0 == 0) #define FALSE (!TRUE) /* * header will be written to buf from frg, act_len of payload will be copied * from act, msg structure will be filled in */ static void msg_write (gcs_recv_msg_t* msg, gcs_act_frag_t* frg, char* buf, size_t buf_len, const char* data, size_t data_len, long sender_idx, gcs_msg_type_t type) { long ret; ret = gcs_act_proto_write (frg, buf, buf_len); fail_if (ret, "error code: %d", ret); fail_if (frg->frag == NULL); fail_if (frg->frag_len < data_len, "Resulting frag_len %lu is less than required act_len %lu\n" "Refactor the test and increase buf_len.", frg->frag_len,data_len); memcpy ((void*)frg->frag, data, data_len); msg->buf = buf; msg->buf_len = buf_len; msg->size = (buf_len - frg->frag_len + data_len); msg->sender_idx = sender_idx; msg->type = type; } static long new_component (gcs_group_t* group, const gcs_comp_msg_t* comp) { long ret = gcs_group_handle_comp_msg (group, comp); // modelling real state exchange is really tedious here, just fake it // group->state = GCS_GROUP_PRIMARY; return ret; } // just pretend we received SYNC message //#define RECEIVE_SYNC() group.new_memb = FALSE; #define RECEIVE_SYNC() #define LOCALHOST "localhost" #define REMOTEHOST "remotehost" #define DISTANTHOST "distanthost" // This tests tests configuration changes START_TEST (gcs_group_configuration) { ssize_t ret; gcs_group_t group; gcs_seqno_t seqno = 1; // The Action const char act_buf[] = "Test action smuction"; ssize_t act_len = sizeof (act_buf); // lengths of three fragments of the action long frag1_len = act_len / 3; long frag2_len = frag1_len; long frag3_len = act_len - frag1_len - frag2_len; // pointer to the three fragments of the action const char* frag1 = act_buf; const char* frag2 = frag1 + frag1_len; const char* frag3 = frag2 + frag2_len; // message buffers const long buf_len = 64; char buf1[buf_len], buf2[buf_len], buf3[buf_len], buf4[buf_len], buf5[buf_len]; // recv message structures gcs_recv_msg_t msg1, msg2, msg3, msg4, msg5; gcs_act_frag_t frg1, frg2, frg3, frg4, frg5, frg; struct gcs_act_rcvd r_act; struct gcs_act* act = &r_act.act; gcs_comp_msg_t* comp; mark_point(); #ifndef NDEBUG // debug build breaks the test due to asserts return; #endif // Initialize message parameters frg1.act_id = getpid(); frg1.act_size = act_len; frg1.frag = NULL; frg1.frag_len = 0; frg1.frag_no = 0; frg1.act_type = GCS_ACT_TORDERED; frg1.proto_ver = 0; // normal fragments frg2 = frg3 = frg1; frg2.frag_no = frg1.frag_no + 1; frg3.frag_no = frg2.frag_no + 1; // bad fragmets to be tried instead of frg2 frg4 = frg5 = frg2; frg4.act_id = frg2.act_id + 1; // wrong action id frg5.act_type = GCS_ACT_SERVICE; // wrong action type mark_point(); msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 0,GCS_MSG_ACTION); msg_write (&msg2, &frg2, buf2, buf_len, frag2, frag2_len, 0,GCS_MSG_ACTION); msg_write (&msg3, &frg3, buf3, buf_len, frag3, frag3_len, 0,GCS_MSG_ACTION); msg_write (&msg4, &frg4, buf4, buf_len, "4444", 4, 0, GCS_MSG_ACTION); msg_write (&msg5, &frg5, buf5, buf_len, "55555", 5, 0, GCS_MSG_ACTION); mark_point(); // ready gcs_group_init (&group, NULL, "my node", "my addr", 0, 0, 0); fail_if (gcs_group_is_primary(&group)); fail_if (group.num != 1); // Prepare first primary component message containing only one node comp = gcs_comp_msg_new (TRUE, false, 0, 1, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0)); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (!gcs_group_new_members(&group)); RECEIVE_SYNC(); #define TRY_MESSAGE(msg) \ ret = gcs_act_proto_read (&frg, (msg).buf, (msg).size); \ ret = gcs_group_handle_act_msg (&group, &frg, &(msg), &r_act, true); // 1. Try fragment that is not the first memset (&r_act, 0, sizeof(r_act)); // ret = gcs_group_handle_act_msg (&group, &frg, &msg3, &r_act); TRY_MESSAGE(msg3); fail_if (ret != -EPROTO); fail_if (act->buf != NULL); fail_if (act->buf_len != 0); mark_point(); // 2. Try first fragment // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf != NULL); fail_if (act->buf_len != 0); #define TRY_WRONG_2ND_FRAGMENT(frag) \ /*ret = gcs_group_handle_act_msg (&group, &frag, &r_act);*/ \ TRY_MESSAGE(frag); \ fail_if (ret != -EPROTO); \ fail_if (act->buf_len != 0); // 3. Try first fragment again gu_debug (""); TRY_WRONG_2ND_FRAGMENT(msg1); gu_debug (""); // 4. Try third fragment TRY_WRONG_2ND_FRAGMENT(msg3); // 5. Try fouth fragment TRY_WRONG_2ND_FRAGMENT(msg4); // 6. Try fifth fragment TRY_WRONG_2ND_FRAGMENT(msg5); // 7. Try correct second fragment // ret = gcs_group_handle_act_msg (&group, &msg2, &r_act); TRY_MESSAGE(msg2); fail_if (ret != 0); fail_if (act->buf != NULL); act->buf = (void*)0x12354; // shall be NULLed fail_if (act->buf_len != 0); // 8. Try third fragment, last one // ret = gcs_group_handle_act_msg (&group, &msg3, &r_act); TRY_MESSAGE(msg3); fail_if (ret != act_len); fail_if (r_act.sender_idx != 0); fail_if (act->buf != NULL); // local action, must be fetched from local fifo fail_if (act->buf_len != act_len); fail_if (r_act.id != seqno, "Expected seqno %llu, found %llu", seqno, r_act.id); seqno++; // cleanup memset (&r_act, 0, sizeof(r_act)); // 10. New component message gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (TRUE, false, 1, 2, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, REMOTEHOST, 1) < 0); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0) < 0); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (!gcs_group_new_members(&group)); RECEIVE_SYNC(); // 11. Try the same with foreign action (now my index is 1, sender is 0) // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); // ret = gcs_group_handle_act_msg (&group, &msg2, &r_act); TRY_MESSAGE(msg2); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); // ret = gcs_group_handle_act_msg (&group, &msg3, &r_act); TRY_MESSAGE(msg3); fail_if (ret != act_len, "Expected ret = %zd, got %zd", act_len, ret); fail_if (act->buf_len != act_len); fail_if (act->buf == NULL); fail_if (strncmp((const char*)act->buf, act_buf, act_len), "Action received: '%s', expected '%s'", act_buf); fail_if (r_act.sender_idx != 0); fail_if (act->type != GCS_ACT_TORDERED); fail_if (r_act.id != seqno, "Expected seqno %llu, found %llu", seqno, r_act.id); seqno++; // cleanup free ((void*)act->buf); memset (&r_act, 0, sizeof(r_act)); // 12. Try foreign action with a new node joined in the middle. gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (TRUE, false, 1, 3, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, REMOTEHOST, 1) < 0); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST,2) < 0); // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (!gcs_group_new_members(&group)); RECEIVE_SYNC(); // now I must be able to resend the action from scratch // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); // ret = gcs_group_handle_act_msg (&group, &msg2, &r_act); TRY_MESSAGE(msg2); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); // ret = gcs_group_handle_act_msg (&group, &msg3, &r_act); TRY_MESSAGE(msg3); fail_if (ret != act_len); fail_if (act->buf_len != act_len); fail_if (act->buf == NULL); fail_if (strncmp((const char*)act->buf, act_buf, act_len), "Action received: '%s', expected '%s'", act_buf); fail_if (r_act.sender_idx != 0); fail_if (act->type != GCS_ACT_TORDERED); fail_if (r_act.id != seqno, "Expected seqno %llu, found %llu", seqno, r_act.id); seqno++; // cleanup free ((void*)act->buf); memset (&r_act, 0, sizeof(r_act)); // 13. Try to send an action with one node disappearing in the middle // and order of nodes changed // 13.1 Each node sends a message // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 1,GCS_MSG_ACTION); // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 2,GCS_MSG_ACTION); // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); // 13.2 configuration changes, one node disappears // (REMOTEHOST, LOCALHOST, DISTANTHOST) -> (LOCALHOST, REMOTEHOST) gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (TRUE, false, 0, 2, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0) < 0); fail_if (gcs_comp_msg_add (comp, REMOTEHOST,1) < 0); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (gcs_group_new_members(&group), "Nodes: %d: node0 - '%s', " // "node1 - '%s'", group.num, // group.nodes[0].id, group.nodes[1].id); RECEIVE_SYNC(); gcs_comp_msg_delete (comp); return; // 13.3 now I just continue sending messages // ret = gcs_group_handle_act_msg (&group, &msg2, &r_act); // local TRY_MESSAGE(msg2); fail_if (ret != 0, "%d (%s)", ret, strerror(-ret)); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); msg_write (&msg2, &frg2, buf2, buf_len, frag2, frag2_len, 1,GCS_MSG_ACTION); // ret = gcs_group_handle_act_msg (&group, &msg2, &r_act); // foreign TRY_MESSAGE(msg2); fail_if (ret != 0); fail_if (act->buf_len != 0); fail_if (act->buf != NULL); act->buf = (void*)0x11111; // shall be NULLed below when local act is recvd // ret = gcs_group_handle_act_msg (&group, &msg3, &r_act); // local TRY_MESSAGE(msg3); fail_if (ret != act_len); fail_if (act->buf_len != act_len); fail_if (act->buf != NULL); fail_if (r_act.sender_idx != 0); fail_if (act->type != GCS_ACT_TORDERED); fail_if (r_act.id != seqno, "Expected seqno %llu, found %llu", seqno, r_act.id); seqno++; msg_write (&msg3, &frg3, buf3, buf_len, frag3, frag3_len, 1,GCS_MSG_ACTION); // ret = gcs_group_handle_act_msg (&group, &msg3, &r_act); // foreign TRY_MESSAGE(msg3); fail_if (ret != act_len); fail_if (act->buf_len != act_len); fail_if (act->buf == NULL); fail_if (strncmp((const char*)act->buf, act_buf, act_len), "Action received: '%s', expected '%s'", act_buf); fail_if (r_act.sender_idx != 1); fail_if (act->type != GCS_ACT_TORDERED); fail_if (r_act.id != seqno, "Expected seqno %llu, found %llu", seqno, r_act.id); seqno++; // cleanup free ((void*)act->buf); memset (&r_act, 0, sizeof(r_act)); // Leave group comp = gcs_comp_msg_new (FALSE, false, -1, 0, 0); fail_if (comp == NULL); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (gcs_group_is_primary(&group)); // comment until implemented: fail_if (!gcs_group_new_members(&group)); RECEIVE_SYNC(); } END_TEST static inline void group_set_last_msg (gcs_recv_msg_t* msg, gcs_seqno_t seqno) { *(gcs_seqno_t*)(msg->buf) = gcs_seqno_htog (seqno); } static inline gcs_seqno_t group_get_last_msg (gcs_recv_msg_t* msg) { return gcs_seqno_gtoh(*(gcs_seqno_t*)(msg->buf)); } // This tests last applied functionality START_TEST(gcs_group_last_applied) { long ret; gcs_group_t group; gcs_comp_msg_t* comp; gcs_recv_msg_t msg0, msg1, msg2, msg3; uint8_t buf0[sizeof(gcs_seqno_t)]; uint8_t buf1[sizeof(gcs_seqno_t)]; uint8_t buf2[sizeof(gcs_seqno_t)]; uint8_t buf3[sizeof(gcs_seqno_t)]; // set up message structures msg0.type = GCS_MSG_LAST; msg0.buf_len = sizeof(gcs_seqno_t); msg0.size = sizeof(gcs_seqno_t); msg1 = msg2 = msg3 = msg0; msg0.buf = buf0; msg1.buf = buf1; msg2.buf = buf2; msg3.buf = buf3; msg0.sender_idx = 0; msg1.sender_idx = 1; msg2.sender_idx = 2; msg3.sender_idx = 3; // Create 4-node component comp = gcs_comp_msg_new (TRUE, false, 0, 4, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0) < 0); fail_if (gcs_comp_msg_add (comp, REMOTEHOST, 1) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST"1",2) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST"2",2) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST"2",2) >= 0); gcs_group_init(&group, NULL, "", "", 0, 0, 1); mark_point(); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (!gcs_group_new_members(&group)); RECEIVE_SYNC(); // 0, 0, 0, 0 fail_if (group.last_applied != 0); group_set_last_msg (&msg0, 1); fail_if (1 != group_get_last_msg(&msg0)); gcs_group_handle_last_msg (&group, &msg0); // 1, 0, 0, 0 fail_if (group.last_applied != 0); // smallest is still 0 group_set_last_msg (&msg1, 2); gcs_group_handle_last_msg (&group, &msg1); // 1, 2, 0, 0 fail_if (group.last_applied != 0); // smallest is still 0 group_set_last_msg (&msg2, 3); gcs_group_handle_last_msg (&group, &msg2); // 1, 2, 3, 0 fail_if (group.last_applied != 0); // smallest is still 0 group_set_last_msg (&msg3, 4); gcs_group_handle_last_msg (&group, &msg3); // 1, 2, 3, 4 fail_if (group.last_applied != 1); // now must be 1 group_set_last_msg (&msg1, 6); gcs_group_handle_last_msg (&group, &msg1); // 1, 6, 3, 4 fail_if (group.last_applied != 1); // now must still be 1 group_set_last_msg (&msg0, 7); gcs_group_handle_last_msg (&group, &msg0); // 7, 6, 3, 4 fail_if (group.last_applied != 3); // now must be 3 group_set_last_msg (&msg3, 8); gcs_group_handle_last_msg (&group, &msg3); // 7, 6, 3, 8 fail_if (group.last_applied != 3); // must still be 3 // remove the lagging node gcs_comp_msg_delete(comp); comp = gcs_comp_msg_new (TRUE, false, 0, 3, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0) < 0); fail_if (gcs_comp_msg_add (comp, REMOTEHOST, 1) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST"2",2) < 0); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (gcs_group_new_members(&group)); RECEIVE_SYNC(); // 7, 6, 8 fail_if (group.last_applied != 6, "Expected %u, got %llu\nGroup: %d: %s, %s, %s", 6, group.last_applied, group.num, group.nodes[0].id, group.nodes[1].id,group.nodes[2].id); // add new node gcs_comp_msg_delete(comp); comp = gcs_comp_msg_new (TRUE, false, 0, 4, 0); fail_if (comp == NULL); fail_if (gcs_comp_msg_add (comp, LOCALHOST, 0) < 0); fail_if (gcs_comp_msg_add (comp, REMOTEHOST, 1) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST"2",2) < 0); fail_if (gcs_comp_msg_add (comp, DISTANTHOST"1",2) < 0); ret = new_component (&group, comp); fail_if (ret < 0); // fail_if (!gcs_group_is_primary(&group)); // fail_if (!gcs_group_new_members(&group)); // 7, 6, 8, 0 fail_if (group.last_applied != 0); } END_TEST START_TEST(test_gcs_group_find_donor) { gcs_group_t group; gcs_group_init(&group, NULL, "", "", 0, 0, 0); const char* s_group_uuid = "0d0d0d0d-0d0d-0d0d-0d0d-0d0d0d0d0d0d"; gu_uuid_scan(s_group_uuid, strlen(s_group_uuid), &group.group_uuid); gu_uuid_t* group_uuid = &group.group_uuid; gu_uuid_t empty_uuid; memset(&empty_uuid, 0, sizeof(empty_uuid)); // five nodes // idx name segment seqno // 0th home0 0 90 // 1th home1 0 95 // 2th home2 0 105 // 3th home3 0(joiner)100 // 4th home4 1 90 // 5th home5 1 95 // 6th home6 1 105 const int number = 7; group.nodes = (gcs_node_t*)malloc(sizeof(gcs_node_t) * number); group.num = number; const gcs_seqno_t seqnos[] = {90, 95, 105, 100, 90, 95, 105}; gcs_node_t* nodes = group.nodes; const int joiner = 3; const gcs_seqno_t ist_seqno = 100; for(int i = 0; i < number; i++) { char name[32]; snprintf(name, sizeof(name), "home%d", i); gcs_node_init(&nodes[i], NULL, name, name, "", 0, 0, 0, i > joiner ? 1 : 0); nodes[i].status = GCS_NODE_STATE_SYNCED; nodes[i].state_msg = gcs_state_msg_create( &empty_uuid, &empty_uuid, &empty_uuid, 0, 0, seqnos[i], 0, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_SYNCED, "", "", 0, 0, 0, 0, 0); } group.quorum.act_id = 0; // in safe range. fail_if (group.quorum.gcs_proto_ver != -1); fail_if (group.gcs_proto_ver != 0); int donor = -1; const int sv = 2; // str version. #define SARGS(s) s, strlen(s) //========== sst ========== donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home3"), &empty_uuid, GCS_SEQNO_ILL); fail_if(donor != -EHOSTDOWN); donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home1,home2"), &empty_uuid, GCS_SEQNO_ILL); fail_if(donor != 1); nodes[1].status = GCS_NODE_STATE_JOINER; donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home1,home2"), &empty_uuid, GCS_SEQNO_ILL); fail_if(donor != 2); nodes[1].status = GCS_NODE_STATE_SYNCED; // handle dangling comma. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home3,"), &empty_uuid, GCS_SEQNO_ILL); fail_if(donor != 0); // ========== ist ========== // by name. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home0,home1,home2"), group_uuid, ist_seqno); fail_if(donor != 1); group.quorum.act_id = 1498; // not in safe range. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home2"), group_uuid, ist_seqno); fail_if(donor != 2); group.quorum.act_id = 1497; // in safe range. in segment. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home2"), group_uuid, ist_seqno); fail_if(donor != 1); group.quorum.act_id = 1497; // in safe range. cross segment. nodes[0].status = GCS_NODE_STATE_JOINER; nodes[1].status = GCS_NODE_STATE_JOINER; nodes[2].status = GCS_NODE_STATE_JOINER; donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home2"), group_uuid, ist_seqno); fail_if(donor != 5); nodes[0].status = GCS_NODE_STATE_SYNCED; nodes[1].status = GCS_NODE_STATE_SYNCED; nodes[2].status = GCS_NODE_STATE_SYNCED; #undef SARGS // todo: free for(int i = 0; i < number; i++) { gcs_state_msg_destroy((gcs_state_msg_t*)nodes[i].state_msg); } free(nodes); } END_TEST Suite *gcs_group_suite(void) { Suite *suite = suite_create("GCS group context"); TCase *tcase = tcase_create("gcs_group"); TCase *tcase_ignore = tcase_create("gcs_group"); suite_add_tcase (suite, tcase); tcase_add_test (tcase_ignore, gcs_group_configuration); tcase_add_test (tcase_ignore, gcs_group_last_applied); tcase_add_test (tcase, test_gcs_group_find_donor); return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_fifo_test.cpp0000644000015300001660000000572413042054732022234 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #include #include "gcs_fifo_test.hpp" #include "../gcs_fifo_lite.hpp" #define FIFO_LENGTH 10 START_TEST (gcs_fifo_lite_test) { gcs_fifo_lite_t* fifo; long ret; long i; long* item; fifo = gcs_fifo_lite_create (0, 1); fail_if (fifo != NULL); fifo = gcs_fifo_lite_create (1, 0); fail_if (fifo != NULL); fifo = gcs_fifo_lite_create (1, 1); fail_if (fifo == NULL); ret = gcs_fifo_lite_destroy (fifo); fail_if (ret != 0, "gcs_fifo_lite_destroy() returned %d", ret); fifo = gcs_fifo_lite_create (FIFO_LENGTH, sizeof(i)); fail_if (fifo == NULL); fail_if (fifo->used != 0, "fifo->used is %z for an empty FIFO", fifo->used); gcs_fifo_lite_open (fifo); // fill FIFO for (i = 1; i <= FIFO_LENGTH; i++) { item = (long*)gcs_fifo_lite_get_tail (fifo); fail_if (NULL == item, "gcs_fifo_lite_get_tail() returned NULL"); *item = i; gcs_fifo_lite_push_tail (fifo); } fail_if (fifo->used != FIFO_LENGTH, "fifo->used is %zu, expected %zu", fifo->used, FIFO_LENGTH); // test remove for (i = 1; i <= FIFO_LENGTH; i++) { ret = gcs_fifo_lite_remove (fifo); fail_if (0 == ret, "gcs_fifo_lite_remove() failed, i = %ld", i); } fail_if (fifo->used != 0, "fifo->used is %zu, expected %zu", fifo->used, 0); // try remove on empty queue ret = gcs_fifo_lite_remove (fifo); fail_if (0 != ret, "gcs_fifo_lite_remove() from empty FIFO returned true"); // it should be possible to fill FIFO again for (i = 1; i <= FIFO_LENGTH; i++) { item = (long*)gcs_fifo_lite_get_tail (fifo); fail_if (NULL == item, "gcs_fifo_lite_get_tail() returned NULL"); *item = i; gcs_fifo_lite_push_tail (fifo); } fail_if (fifo->used != FIFO_LENGTH, "fifo->used is %zu, expected %zu", fifo->used, FIFO_LENGTH); // test get for (i = 1; i <= FIFO_LENGTH; i++) { item = (long*)gcs_fifo_lite_get_head (fifo); fail_if (NULL == item, "gcs_fifo_lite_get_head() returned NULL"); fail_if (*item != i, "gcs_fifo_lite_get_head() returned %ld, " "expected %ld", *item, i); gcs_fifo_lite_release (fifo); item = (long*)gcs_fifo_lite_get_head (fifo); fail_if (NULL == item, "gcs_fifo_lite_get_head() returned NULL"); fail_if (*item != i, "gcs_fifo_lite_get_head() returned %ld, " "expected %ld", *item, i); gcs_fifo_lite_pop_head (fifo); } fail_if (fifo->used != 0, "fifo->used for empty queue is %ld", fifo->used); ret = gcs_fifo_lite_destroy (fifo); fail_if (ret != 0, "gcs_fifo_lite_destroy() failed: %d", ret); } END_TEST Suite *gcs_fifo_suite(void) { Suite *s = suite_create("GCS FIFO functions"); TCase *tc = tcase_create("gcs_fifo"); suite_add_tcase (s, tc); tcase_add_test (tc, gcs_fifo_lite_test); return s; } galera-3-25.3.20/gcs/src/unit_tests/gcs_sm_test.cpp0000644000015300001660000004013113042054732021717 0ustar jenkinsjenkins// Copyright (C) 2010-2016 Codership Oy // $Id$ #include #include "../gcs_sm.hpp" #include // fabs #include #include #include "gcs_sm_test.hpp" #define TEST_USLEEP 10000 /* we can't use pthread functions for waiting for certain conditions */ #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (1000); }} START_TEST (gcs_sm_test_basic) { int ret; gcs_sm_t* sm = gcs_sm_create(2, 1); fail_if(!sm); gu_cond_t cond; gu_cond_init (&cond, NULL); int i; for (i = 1; i < 5; i++) { ret = gcs_sm_enter(sm, &cond, false, true); fail_if(ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); fail_if(sm->users != 1, "users = %ld, expected 1", sm->users); fail_if(sm->entered != 1, "entered = %d, expected 1", sm->entered); gcs_sm_leave(sm); fail_if(sm->entered != 0, "entered = %d, expected %d", sm->entered, 0); } ret = gcs_sm_close(sm); fail_if(ret); gcs_sm_destroy(sm); gu_cond_destroy(&cond); } END_TEST volatile long simple_ret; static void* simple_thread(void* arg) { gcs_sm_t* sm = (gcs_sm_t*) arg; gu_cond_t cond; gu_cond_init (&cond, NULL); if (0 == (simple_ret = gcs_sm_enter (sm, &cond, false, true))) { usleep(1000); gcs_sm_leave (sm); } gu_cond_destroy (&cond); return NULL; } START_TEST (gcs_sm_test_simple) { int ret; gcs_sm_t* sm = gcs_sm_create(4, 1); fail_if(!sm); gu_cond_t cond; gu_cond_init (&cond, NULL); ret = gcs_sm_enter(sm, &cond, false, true); fail_if(ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); fail_if(sm->users != 1, "users = %ld, expected 1", sm->users); fail_if(sm->entered != true, "entered = %d, expected %d", sm->users, true); gu_thread_t t1, t2, t3, t4; gu_thread_create (&t1, NULL, simple_thread, sm); gu_thread_create (&t2, NULL, simple_thread, sm); gu_thread_create (&t3, NULL, simple_thread, sm); WAIT_FOR ((long)sm->wait_q_len == sm->users); fail_if((long)sm->wait_q_len != sm->users, "wait_q_len = %lu, users = %ld", sm->wait_q_len, sm->users); gu_thread_create (&t4, NULL, simple_thread, sm); mark_point(); gu_thread_join (t4, NULL); // there's no space in the queue fail_if (simple_ret != -EAGAIN); fail_if (0 != sm->wait_q_tail, "wait_q_tail = %lu, expected 0", sm->wait_q_tail); fail_if (1 != sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); fail_if (4 != sm->users, "users = %lu, expected 4", sm->users); gu_info ("Calling gcs_sm_leave()"); gcs_sm_leave(sm); fail_unless(4 > sm->users, "users = %lu, expected 4", sm->users); gu_info ("Calling gcs_sm_close()"); ret = gcs_sm_close(sm); fail_if(ret); gu_thread_join(t1, NULL); gu_thread_join(t2, NULL); gu_thread_join(t3, NULL); gcs_sm_destroy(sm); gu_cond_destroy(&cond); } END_TEST static volatile int order = 0; // global variable to trac the order of events static void* closing_thread (void* data) { gcs_sm_t* sm = (gcs_sm_t*)data; fail_if(order != 0, "order is %d, expected 0", order); order = 1; int ret = gcs_sm_close(sm); fail_if(ret); fail_if(order != 2, "order is %d, expected 2", order); gcs_sm_destroy(sm); return NULL; } START_TEST (gcs_sm_test_close) { order = 0; gcs_sm_t* sm = gcs_sm_create(2, 1); fail_if(!sm); gu_cond_t cond; gu_cond_init (&cond, NULL); int ret = gcs_sm_enter(sm, &cond, false, true); fail_if(ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); fail_if(sm->users != 1, "users = %ld, expected 1", sm->users); fail_if(order != 0); fail_if(1 != sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); fail_if(1 != sm->wait_q_tail, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); gu_thread_t thr; gu_thread_create (&thr, NULL, closing_thread, sm); WAIT_FOR(1 == order); fail_if(order != 1, "order is %d, expected 1", order); usleep(TEST_USLEEP); // make sure closing_thread() blocks in gcs_sm_close() fail_if(sm->users != 2, "users = %ld, expected 2", sm->users); gu_info ("Started close thread, users = %ld", sm->users); fail_if(1 != sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); fail_if(0 != sm->wait_q_tail, "wait_q_tail = %lu, expected 0", sm->wait_q_tail); fail_if(1 != sm->entered); order = 2; gcs_sm_leave(sm); mark_point(); gu_thread_join(thr, NULL); gu_cond_destroy(&cond); } END_TEST static volatile int pause_order = 0; static void* pausing_thread (void* data) { gu_info ("pausing_thread start, pause_order = %d", pause_order); gcs_sm_t* sm = (gcs_sm_t*)data; gu_cond_t cond; gu_cond_init (&cond, NULL); gcs_sm_schedule (sm); gu_info ("pausing_thread scheduled, pause_order = %d", pause_order); fail_if (pause_order != 0, "pause_order = %d, expected 0"); pause_order = 1; gcs_sm_enter (sm, &cond, true, true); gu_info ("pausing_thread entered, pause_order = %d", pause_order); fail_if (pause_order != 2, "pause_order = %d, expected 2"); pause_order = 3; usleep(TEST_USLEEP); gcs_sm_leave (sm); mark_point(); gu_cond_destroy(&cond); gu_info ("pausing_thread exit, pause_order = %d", pause_order); return NULL; } static double const EPS = 1.0e-15; // double precision START_TEST (gcs_sm_test_pause) { int q_len; int q_len_max; int q_len_min; double q_len_avg; long long paused_ns; double paused_avg; gcs_sm_t* sm = gcs_sm_create(4, 1); fail_if(!sm); fail_if(1 != sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); gu_cond_t cond; gu_cond_init (&cond, NULL); gu_thread_t thr; gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &paused_ns, &paused_avg); fail_if (paused_ns != 0, "paused_ns: expected 0, got %lld", paused_ns); fail_if (fabs(paused_avg) > EPS, "paused_avg: expected <= %e, got %e", EPS, fabs(paused_avg)); fail_if (fabs(q_len_avg) > EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); fail_if (q_len != 0); fail_if (q_len_max != 0); fail_if (q_len_min != 0); // Test attempt to enter paused monitor pause_order = 0; gcs_sm_pause (sm); gu_thread_create (&thr, NULL, pausing_thread, sm); WAIT_FOR(1 == pause_order); fail_if (pause_order != 1, "pause_order = %d, expected 1"); usleep(TEST_USLEEP); // make sure pausing_thread blocked in gcs_sm_enter() pause_order = 2; // testing taking stats in the middle of the pause pt. 1 gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &paused_ns, &paused_avg); fail_if (paused_ns <= 0.0); fail_if (paused_avg <= 0.0); fail_if (fabs(q_len_avg) > EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gu_info ("Calling gcs_sm_continue()"); gcs_sm_continue (sm); gu_thread_join (thr, NULL); fail_if (pause_order != 3, "pause_order = %d, expected 3"); fail_if(2 != sm->wait_q_head, "wait_q_head = %lu, expected 2", sm->wait_q_head); fail_if(1 != sm->wait_q_tail, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); // testing taking stats in the middle of the pause pt. 2 long long tmp; gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &tmp, &paused_avg); fail_if (tmp <= paused_ns); paused_ns = tmp; fail_if (paused_avg <= 0.0); fail_if (fabs(q_len_avg) > EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gcs_sm_stats_flush(sm); // Testing scheduling capability gcs_sm_schedule (sm); fail_if(2 != sm->wait_q_tail, "wait_q_tail = %lu, expected 2", sm->wait_q_tail); gu_thread_create (&thr, NULL, pausing_thread, sm); usleep (TEST_USLEEP); // no changes in pause_order fail_if (pause_order != 3, "pause_order = %d, expected 3"); pause_order = 0; int ret = gcs_sm_enter(sm, &cond, true, true); fail_if (ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); // released monitor lock, thr should continue and schedule, // set pause_order to 1 WAIT_FOR(1 == pause_order); fail_if (pause_order != 1, "pause_order = %d, expected 1"); fail_if (sm->users != 2, "users = %ld, expected 2", sm->users); fail_if(2 != sm->wait_q_head, "wait_q_head = %lu, expected 2", sm->wait_q_head); fail_if(3 != sm->wait_q_tail, "wait_q_tail = %lu, expected 3", sm->wait_q_tail); gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &tmp, &paused_avg); fail_if (tmp < paused_ns); paused_ns = tmp; fail_if (fabs(paused_avg) > EPS, "paused_avg: expected <= %e, got %e", EPS, fabs(paused_avg)); fail_if (q_len != sm->users, "found q_len %d, expected = %d", q_len, sm->users); fail_if (q_len_max != q_len, "found q_len_max %d, expected = %d", q_len_max, q_len); fail_if (q_len_min != 0, "found q_len_min %d, expected = 0", q_len_min); fail_if (fabs(q_len_avg - 0.5) > EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gcs_sm_stats_flush(sm); gu_info ("Started pause thread, users = %ld", sm->users); // Now test pausing when monitor is in entered state pause_order = 2; gcs_sm_pause (sm); usleep (TEST_USLEEP); gcs_sm_continue (sm); // nothing should continue, since monitor is entered usleep (TEST_USLEEP); fail_if (pause_order != 2, "pause_order = %d, expected 2"); fail_if (sm->entered != 1, "entered = %ld, expected 1", sm->entered); // Now test pausing when monitor is left gcs_sm_pause (sm); fail_if (sm->users != 2, "users = %ld, expected 2", sm->users); gcs_sm_leave (sm); fail_if (sm->users != 1, "users = %ld, expected 1", sm->users); fail_if (sm->entered != 0, "entered = %ld, expected 1", sm->entered); fail_if(3 != sm->wait_q_head, "wait_q_head = %lu, expected 3", sm->wait_q_head); fail_if(3 != sm->wait_q_tail, "wait_q_tail = %lu, expected 3", sm->wait_q_tail); usleep (TEST_USLEEP); // nothing should change, since monitor is paused fail_if (pause_order != 2, "pause_order = %d, expected 2"); fail_if (sm->entered != 0, "entered = %ld, expected 0", sm->entered); fail_if (sm->users != 1, "users = %ld, expected 1", sm->users); gcs_sm_continue (sm); // paused thread should continue WAIT_FOR(3 == pause_order); fail_if (pause_order != 3, "pause_order = %d, expected 3"); gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &tmp, &paused_avg); fail_if (tmp <= paused_ns); paused_ns = tmp; fail_if (paused_avg <= 0.0); fail_if (fabs(q_len_avg) > EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gcs_sm_enter (sm, &cond, false, true); // by now paused thread exited monitor fail_if (sm->entered != 1, "entered = %ld, expected 1", sm->entered); fail_if (sm->users != 1, "users = %ld, expected 1", sm->users); fail_if(0 != sm->wait_q_head, "wait_q_head = %lu, expected 0", sm->wait_q_head); fail_if(0 != sm->wait_q_tail, "wait_q_tail = %lu, expected 0", sm->wait_q_tail); gcs_sm_leave (sm); fail_if(1 != sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); mark_point(); gu_cond_destroy(&cond); gcs_sm_close (sm); mark_point(); gu_thread_join(thr, NULL); gcs_sm_destroy (sm); } END_TEST static volatile long global_handle = 0; static volatile long global_ret = 0; static void* interrupt_thread(void* arg) { gcs_sm_t* sm = (gcs_sm_t*) arg; global_handle = gcs_sm_schedule (sm); if (global_handle >= 0) { pthread_cond_t cond; pthread_cond_init (&cond, NULL); if (0 == (global_ret = gcs_sm_enter (sm, &cond, true, true))) { gcs_sm_leave (sm); } pthread_cond_destroy (&cond); } return NULL; } #define TEST_CREATE_THREAD(thr, tail, h, u) \ global_handle = -1; \ gu_thread_create (thr, NULL, interrupt_thread, sm); \ WAIT_FOR(global_handle == h); \ fail_if (sm->wait_q_tail != tail, "wait_q_tail = %lu, expected %lu", \ sm->wait_q_tail, tail); \ fail_if (global_handle != h, "global_handle = %ld, expected %ld", \ global_handle, h); \ fail_if (sm->users != u, "users = %ld, expected %ld", sm->users, u); #define TEST_INTERRUPT_THREAD(h, t) \ ret = gcs_sm_interrupt (sm, (h)); \ fail_if (ret != 0); \ gu_thread_join ((t), NULL); \ fail_if (global_ret != -EINTR, "global_ret = %ld, expected %ld (-EINTR)", \ global_ret, -EINTR); START_TEST (gcs_sm_test_interrupt) { gcs_sm_t* sm = gcs_sm_create(4, 1); fail_if(!sm); gu_cond_t cond; gu_cond_init (&cond, NULL); gu_thread_t thr1; gu_thread_t thr2; gu_thread_t thr3; long handle = gcs_sm_schedule (sm); fail_if (handle != 0, "handle = %ld, expected 0"); fail_if (sm->wait_q_tail != 1, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); long ret = gcs_sm_enter (sm, &cond, true, true); fail_if (ret != 0); /* 1. Test interrupting blocked by previous thread */ TEST_CREATE_THREAD(&thr1, 2, 3, 2); TEST_CREATE_THREAD(&thr2, 3, 4, 3); TEST_INTERRUPT_THREAD(3, thr1); gcs_sm_leave (sm); // this should let 2nd enter monitor gu_thread_join (thr2, NULL); fail_if (global_ret != 0, "global_ret = %ld, expected 0", global_ret); fail_if (sm->users != 0, "users = %ld, expected 0", sm->users); ret = gcs_sm_interrupt (sm, 4); // try to interrupt 2nd which has exited fail_if (ret != -ESRCH); /* 2. Test interrupting blocked by pause */ gcs_sm_pause (sm); TEST_CREATE_THREAD(&thr1, 0, 1, 1); TEST_INTERRUPT_THREAD(1, thr1); TEST_CREATE_THREAD(&thr2, 1, 2, 2); /* test queueing after interrupted */ TEST_CREATE_THREAD(&thr3, 2, 3, 3); TEST_INTERRUPT_THREAD(3, thr3); /* test interrupting last waiter */ gcs_sm_continue (sm); gu_thread_join (thr2, NULL); fail_if (global_ret != 0, "global_ret = %ld, expected 0", global_ret); /* 3. Unpausing totally interrupted monitor */ gcs_sm_pause (sm); TEST_CREATE_THREAD(&thr1, 3, 4, 1); TEST_INTERRUPT_THREAD(4, thr1); TEST_CREATE_THREAD(&thr1, 0, 1, 2); TEST_INTERRUPT_THREAD(1, thr1); gcs_sm_continue (sm); /* check that monitor is still functional */ ret = gcs_sm_enter (sm, &cond, false, true); fail_if (ret != 0); fail_if(1 != sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); fail_if(1 != sm->wait_q_tail, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); fail_if (sm->users != 1, "users = %ld, expected 1", sm->users); TEST_CREATE_THREAD(&thr1, 2, 3, 2); gu_info ("Calling gcs_sm_leave()"); gcs_sm_leave (sm); pthread_join (thr1, NULL); fail_if (global_ret != 0, "global_ret = %ld, expected 0", global_ret); pthread_cond_destroy (&cond); gcs_sm_close (sm); gcs_sm_destroy (sm); } END_TEST Suite *gcs_send_monitor_suite(void) { Suite *s = suite_create("GCS send monitor"); TCase *tc = tcase_create("gcs_sm"); suite_add_tcase (s, tc); tcase_add_test (tc, gcs_sm_test_basic); tcase_add_test (tc, gcs_sm_test_simple); tcase_add_test (tc, gcs_sm_test_close); tcase_add_test (tc, gcs_sm_test_pause); tcase_add_test (tc, gcs_sm_test_interrupt); return s; } galera-3-25.3.20/gcs/src/unit_tests/gcs_comp_test.cpp0000644000015300001660000001124313042054732022240 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include #include #include #include #define GCS_COMP_MSG_ACCESS #include "../gcs_comp_msg.hpp" #include "gcs_comp_test.hpp" static gcs_comp_memb_t const members[] = { { "0", 0 }, { "88888888", 1 }, { "1", 5 }, { "7777777", 1 }, { "22", 3 }, { "666666", 4 }, { "333", 5 }, { "55555", 5 }, { "4444", 0 } }; static char long_id[] = "just make it longer when the test starts to fail because of increased limit"; static void check_msg_identity (const gcs_comp_msg_t* m, const gcs_comp_msg_t* n) { long i; fail_if (n->primary != m->primary); fail_if (n->my_idx != m->my_idx); fail_if (n->memb_num != m->memb_num); for (i = 0; i < m->memb_num; i++) { fail_if (strlen(n->memb[i].id) != strlen(m->memb[i].id), "member %d id len does not match: %d vs %d", i, strlen(n->memb[i].id), strlen(m->memb[i].id)); fail_if (strncmp (n->memb[i].id, m->memb[i].id, GCS_COMP_MEMB_ID_MAX_LEN), "member %d IDs don't not match: got '%s', should be '%s'", i, members[i], m->memb[i].id); fail_if (n->memb[i].segment != m->memb[i].segment, "member %d segments don't not match: got '%d', should be '%d'", i, (int)members[i].segment, (int)m->memb[i].segment); } } START_TEST (gcs_comp_test) { long memb_num = sizeof(members)/sizeof(members[0]); long my_idx = getpid() % memb_num; long prim = my_idx % 2; gcs_comp_msg_t* m = gcs_comp_msg_new (prim, false, my_idx, memb_num, 0); gcs_comp_msg_t* n = NULL; size_t buf_len = gcs_comp_msg_size (m); char buf[buf_len]; long i, j; long ret; fail_if (NULL == m); fail_if (memb_num != gcs_comp_msg_num (m)); fail_if (my_idx != gcs_comp_msg_self (m)); // add members except for the last for (i = 0; i < memb_num - 1; i++) { ret = gcs_comp_msg_add (m, members[i].id, members[i].segment); fail_if (ret != i, "gcs_comp_msg_add() returned %d, expected %d", ret, i); } // try to add a id that was added already if (my_idx < i) { j = my_idx; } else { j = i - 1; } ret = gcs_comp_msg_add (m, members[j].id, members[j].segment); fail_if (ret != -ENOTUNIQ, "gcs_comp_msg_add() returned %d, expected " "-ENOTUNIQ (%d)", ret, -ENOTUNIQ); // try to add empty id ret = gcs_comp_msg_add (m, "", 0); fail_if (ret != -EINVAL, "gcs_comp_msg_add() returned %d, expected " "-EINVAL (%d)", ret, -EINVAL); // try to add id that is too long ret = gcs_comp_msg_add (m, long_id, 3); fail_if (ret != -ENAMETOOLONG, "gcs_comp_msg_add() returned %d, expected " "-ENAMETOOLONG (%d)", ret, -ENAMETOOLONG); // add final id ret = gcs_comp_msg_add (m, members[i].id, members[i].segment); fail_if (ret != i, "gcs_comp_msg_add() returned %d, expected %d", ret, i); // check that all added correctly for (i = 0; i < memb_num; i++) { const char* const id = gcs_comp_msg_member(m, i)->id; fail_if (strcmp (members[i].id, id), "Memeber %ld (%s) recorded as %s", i, members[i].id, id); } // check that memcpy preserves the message // (it can be treated just as a byte array) memcpy (buf, m, buf_len); n = (gcs_comp_msg_t*) buf; check_msg_identity (m, n); gcs_comp_msg_delete (m); mark_point(); // check that gcs_comp_msg_copy() works m = gcs_comp_msg_copy (n); fail_if (NULL == m); check_msg_identity (m, n); gcs_comp_msg_delete (m); // test gcs_comp_msg_member() fail_unless (NULL == gcs_comp_msg_member (n, -1)); for (i = 0; i < memb_num; i++) { const char* id = gcs_comp_msg_member (n, i)->id; fail_if (NULL == id); fail_if (strcmp(members[i].id, id)); } fail_unless (NULL == gcs_comp_msg_member (n, i)); // test gcs_comp_msg_idx() fail_if (-1 != gcs_comp_msg_idx (n, "")); fail_if (-1 != gcs_comp_msg_idx (n, long_id)); for (i = 0; i < memb_num; i++) fail_if (i != gcs_comp_msg_idx (n, members[i].id)); // test gcs_comp_msg_primary() fail_if (n->primary != gcs_comp_msg_primary(n)); } END_TEST Suite *gcs_comp_suite(void) { Suite *suite = suite_create("GCS component message"); TCase *tcase = tcase_create("gcs_comp"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_comp_test); return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_fc_test.hpp0000644000015300001660000000030013042054732021667 0ustar jenkinsjenkins// Copyright (C) 2010 Codership Oy // $Id$ #ifndef __gcs_fc_test__ #define __gcs_fc_test__ #include Suite *gcs_fc_suite(void); #endif /* __gcs_fc_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_fc_test.cpp0000644000015300001660000001005513042054732021672 0ustar jenkinsjenkins// Copyright (C) 2010 Codership Oy // $Id$ #include "gcs_fc_test.hpp" #include "../gcs_fc.hpp" #include #include START_TEST(gcs_fc_test_limits) { gcs_fc_t fc; int ret; ret = gcs_fc_init (&fc, 16, 0.5, 0.1); fail_if (ret != 0); ret = gcs_fc_init (&fc, -1, 0.5, 0.1); fail_if (ret != -EINVAL); ret = gcs_fc_init (&fc, 16, 1.0, 0.1); fail_if (ret != -EINVAL); ret = gcs_fc_init (&fc, 16, 0.5, 1.0); fail_if (ret != -EINVAL); } END_TEST /* This is a macro to preserve line numbers in fail_if() output */ #define SKIP_N_ACTIONS(fc_,n_) \ { \ int i; \ for (i = 0; i < n_; ++i) \ { \ long long ret = gcs_fc_process (fc_, 0); \ fail_if (ret != 0, "0-sized action #%d returned %d (%s)", \ i, ret, strerror(-ret)); \ } \ } START_TEST(gcs_fc_test_basic) { gcs_fc_t fc; int ret; long long pause; ret = gcs_fc_init (&fc, 16, 0.5, 0.1); fail_if (ret != 0); gcs_fc_reset (&fc, 8); usleep (1000); SKIP_N_ACTIONS(&fc, 7); /* Here we exceed soft limit almost instantly, which should give a very high * data rate and as a result a need to sleep */ pause = gcs_fc_process (&fc, 7); fail_if(pause <= 0, "Soft limit trip returned %lld (%s)", pause, strerror(-pause)); gcs_fc_reset (&fc, 7); usleep (1000); SKIP_N_ACTIONS(&fc, 7); /* Here we reach soft limit almost instantly, which should give a very high * data rate, but soft limit is not exceeded, so no sleep yet. */ pause = gcs_fc_process (&fc, 1); fail_if(pause != 0, "Soft limit touch returned %lld (%s)", pause, strerror(-pause)); SKIP_N_ACTIONS(&fc, 7); usleep (1000); pause = gcs_fc_process (&fc, 7); fail_if(pause <= 0, "Soft limit trip returned %lld (%s)", pause, strerror(-pause)); /* hard limit excess should be detected instantly */ pause = gcs_fc_process (&fc, 1); fail_if(pause != -ENOMEM, "Hard limit trip returned %lld (%s)", pause, strerror(-pause)); } END_TEST static inline bool double_equals (double a, double b) { static double const eps = 0.001; double diff = (a - b) / (a + b); // roughly relative difference return !(diff > eps || diff < -eps); } START_TEST(gcs_fc_test_precise) { gcs_fc_t fc; long long ret; struct timespec p10ms = {0, 10000000 }; // 10 ms ret = gcs_fc_init (&fc, 2000, 0.5, 0.5); fail_if (ret != 0); gcs_fc_reset (&fc, 500); SKIP_N_ACTIONS(&fc, 7); nanosleep (&p10ms, NULL); ret = gcs_fc_process (&fc, 1000); fail_if(ret <= 0, "Soft limit trip returned %d (%s)", ret, strerror(-ret)); // measured data rate should be ~100000 b/s // slave queue length should be half-way between soft limit and hard limit // desired rate should be half between 1.0 and 0.5 of full rate -> 75000 b/s // excess over soft limit is 500 and corresponding interval: 5ms // (500/5ms == 100000 b/s) // additional sleep must be 1.6667 ms (500/(5 + 1.6667) ~ 75000 b/s) double const correction = 100000.0/fc.max_rate; // due to imprecise sleep double const expected_sleep = 0.001666667*correction; double sleep = ((double)ret)*1.0e-9; fail_if(!double_equals(sleep, expected_sleep), "Sleep: %f, expected %f", sleep, expected_sleep); } END_TEST Suite *gcs_fc_suite(void) { Suite *s = suite_create("GCS state transfer FC"); TCase *tc = tcase_create("gcs_fc"); suite_add_tcase (s, tc); tcase_add_test (tc, gcs_fc_test_limits); tcase_add_test (tc, gcs_fc_test_basic); tcase_add_test (tc, gcs_fc_test_precise); return s; } galera-3-25.3.20/gcs/src/unit_tests/gcs_fifo_test.hpp0000644000015300001660000000026413042054732022233 0ustar jenkinsjenkins// Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gcs_fifo_test__ #define __gcs_fifo_test__ Suite *gcs_fifo_suite(void); #endif /* __gcs_fifo_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_defrag_test.hpp0000644000015300001660000000031313042054732022533 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gcs_defrag_test__ #define __gcs_defrag_test__ extern Suite *gcs_defrag_suite(void); #endif /* __gu_defrag_test__ */ galera-3-25.3.20/gcs/src/unit_tests/gcs_node_test.cpp0000644000015300001660000000321613042054732022230 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include #include #include "gcs_node_test.hpp" #include "../gcs_node.hpp" #define NODE_ID "owpiefd[woie" #define NODE_NAME "strange name" #define NODE_ADDR "0.0.0.0:0" START_TEST (gcs_node_test) { /* this is a small unit test as node unit does almost nothing */ gcs_node_t node1, node2; static const gcs_seqno_t seqno = 333; gcs_node_init (&node1, NULL, NODE_ID, NODE_NAME, NODE_ADDR, 0, 0, 0, 0); gcs_node_init (&node2, NULL, "baka", NULL, NULL, 0, 0, 0, 0); fail_if (strcmp(node1.id, NODE_ID), "Expected node id '%s', found '%s'", NODE_ID, node1.id); fail_if (strcmp(node1.name, NODE_NAME), "Expected node name '%s', " "found '%s'", NODE_NAME, node1.name); fail_if (strcmp(node1.inc_addr, NODE_ADDR), "Expected node id '%s', " "found '%s'", NODE_ADDR, node1.inc_addr); fail_if (gcs_node_get_last_applied(&node1)); gcs_node_set_last_applied (&node1, seqno); mark_point(); gcs_node_move (&node2, &node1); fail_if (seqno != gcs_node_get_last_applied (&node2), "move didn't preserve last_applied"); fail_if (strcmp(node2.id, NODE_ID), "Expected node id '%s', found '%s'", NODE_ID, node2.id); gcs_node_reset (&node1); mark_point(); gcs_node_free (&node2); } END_TEST Suite *gcs_node_suite(void) { Suite *suite = suite_create("GCS node context"); TCase *tcase = tcase_create("gcs_node"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_node_test); return suite; } galera-3-25.3.20/gcs/src/unit_tests/gcs_memb_test.cpp0000644000015300001660000003714213042054732022230 0ustar jenkinsjenkins/* * Copyright (C) 2011-2013 Codership Oy * * $Id$ */ #include "../gcs_group.hpp" #include "../gcs_comp_msg.hpp" #include #include #include #include "gcs_memb_test.hpp" struct node { gcs_group_t group; char id[GCS_COMP_MEMB_ID_MAX_LEN + 1]; /// ID assigned by the backend }; #define MAX_NODES 10 struct group { struct node* nodes[MAX_NODES]; int nodes_num; }; /* delivers new component message to all memebers */ static long deliver_component_msg (struct group* group, bool prim) { int i; for (i = 0; i < group->nodes_num; i++) { gcs_comp_msg_t* msg = gcs_comp_msg_new (prim, false, i, group->nodes_num, 0); if (msg) { int j; for (j = 0; j < group->nodes_num; j++) { const struct node* const node = group->nodes[j]; long ret = gcs_comp_msg_add (msg, node->id, j); fail_if (j != ret, "Failed to add %d member: %ld (%s)", j, ret, strerror(-ret)); } /* check component message */ fail_if (i != gcs_comp_msg_self(msg)); fail_if (group->nodes_num != gcs_comp_msg_num(msg)); for (j = 0; j < group->nodes_num; j++) { const char* const src_id = group->nodes[j]->id; const char* const dst_id = gcs_comp_msg_member(msg, j)->id; fail_if (strcmp(src_id, dst_id), "%d node id %s, recorded in comp msg as %s", j, src_id, dst_id); // gcs_segment_t const src_seg = group->nodes[j]->segment; gcs_segment_t const dst_seg = gcs_comp_msg_member(msg, j)->segment; fail_if (j != dst_seg, "%d node segment %d, recorded in comp msg as %d", j, j, (int)dst_seg); } gcs_group_state_t ret = gcs_group_handle_comp_msg (&(group->nodes[i]->group), msg); fail_if (ret != GCS_GROUP_WAIT_STATE_UUID); gcs_comp_msg_delete (msg); /* check that uuids are properly recorded in internal structures */ for (j = 0; j < group->nodes_num; j++) { const char* src_id = group->nodes[j]->id; const char* dst_id = group->nodes[i]->group.nodes[j].id; fail_if (strcmp(src_id, dst_id), "%d node id %s, recorded at node %d as %s", j, src_id, i, dst_id); } } else { return -ENOMEM; } } return 0; } #if 0 static long group_send_msg (struct group* group, gcs_group_t* node, const void* msg, ssize_t msg_len) { return 0; } #endif static long perform_state_exchange (struct group* group) { /* first deliver state uuid message */ gu_uuid_t state_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gcs_recv_msg_t uuid_msg(&state_uuid, sizeof (state_uuid), sizeof (state_uuid), 0, GCS_MSG_STATE_UUID); gcs_group_state_t state; int i; for (i = 0; i < group->nodes_num; i++) { state = gcs_group_handle_uuid_msg (&(group->nodes[i]->group),&uuid_msg); fail_if (state != GCS_GROUP_WAIT_STATE_MSG, "Wrong group state after STATE_UUID message. " "Expected: %d, got: %d", GCS_GROUP_WAIT_STATE_MSG, state); } /* complete state message exchange */ for (i = 0; i < group->nodes_num; i++) { /* create state message from node i */ gcs_state_msg_t* state = gcs_group_get_state (&(group->nodes[i]->group)); fail_if (NULL == state); ssize_t state_len = gcs_state_msg_len (state); uint8_t state_buf[state_len]; gcs_state_msg_write (state_buf, state); gcs_recv_msg_t state_msg(state_buf, sizeof (state_buf), sizeof (state_buf), i, GCS_MSG_STATE_MSG); /* deliver to each of the nodes */ int j; for (j = 0; j < group->nodes_num; j++) { gcs_group_state_t ret = gcs_group_handle_state_msg (&(group->nodes[j]->group), &state_msg); if (group->nodes_num - 1 == i) { // a message from the last node fail_if (ret != GCS_GROUP_PRIMARY, "Handling state msg failed: sender %d, receiver %d", i, j); } else { fail_if (ret != GCS_GROUP_WAIT_STATE_MSG, "Handling state msg failed: sender %d, receiver %d", i, j); } } gcs_state_msg_destroy (state); } return 0; } static long group_add_node (struct group* group, struct node* node, bool new_id) { if (new_id) { gu_uuid_t node_uuid; gu_uuid_generate (&node_uuid, NULL, 0); gu_uuid_print (&node_uuid, (char*)node->id, sizeof (node->id)); gu_debug ("Node %d (%p) UUID: %s", group->nodes_num, node, node->id); } group->nodes[group->nodes_num] = node; group->nodes_num++; /* check that all node ids are different */ int i; for (i = 0; i < group->nodes_num; i++) { int j; for (j = i+1; j < group->nodes_num; j++) { fail_if (!strcmp (group->nodes[i]->id, group->nodes[j]->id), "%d (%p) and %d (%p) have the same id: %s/%s", i, group->nodes[i], j,group->nodes[j], group->nodes[i]->id, group->nodes[j]->id); } } /* deliver new component message to all nodes */ long ret = deliver_component_msg (group, true); fail_if (ret != 0, "Component message delivery failed: %d (%s)", ret, strerror(-ret)); /* deliver state exchange uuid */ ret = perform_state_exchange (group); fail_if (ret != 0, "State exchange failed: %d (%s)", ret, strerror(-ret)); return 0; } /* NOTE: this function uses simplified and determinitstic algorithm where * dropped node is always replaced by the last one in group. * For our purposes (reproduction of #465) it fits perfectly. */ static struct node* group_drop_node (struct group* group, int idx) { struct node* dropped = group->nodes[idx]; group->nodes[idx] = group->nodes[group->nodes_num - 1]; group->nodes[group->nodes_num - 1] = NULL; group->nodes_num--; if (group->nodes_num > 0) { deliver_component_msg (group, true); perform_state_exchange (group); } return dropped; } static gcs_node_state_t get_node_state (struct node* node) { return node->group.nodes[node->group.my_idx].status; } /* for delivery of GCS_MSG_SYNC or GCS_MSG_JOIN msg*/ static long deliver_join_sync_msg (struct group* const group, int const src, gcs_msg_type_t type) { gcs_seqno_t seqno = group->nodes[src]->group.act_id_; gcs_recv_msg_t msg(&seqno, sizeof (seqno), sizeof (seqno), src, type); long ret = -1; int i; for (i = 0; i < group->nodes_num; i++) { gcs_group_t* const gr = &group->nodes[i]->group; switch (type) { case GCS_MSG_JOIN: ret = gcs_group_handle_join_msg(gr, &msg); mark_point(); if (i == src) { fail_if (ret != 1, "%d failed to handle own JOIN message: %d (%s)", i, ret, strerror (-ret)); } else { fail_if (ret != 0, "%d failed to handle other JOIN message: %d (%s)", i, ret, strerror (-ret)); } break; case GCS_MSG_SYNC: ret = gcs_group_handle_sync_msg(gr, &msg); if (i == src) { fail_if (ret != 1 && gr->nodes[src].status == GCS_NODE_STATE_JOINED, "%d failed to handle own SYNC message: %d (%s)", i, ret, strerror (-ret)); } else { fail_if (ret != 0, "%d failed to handle other SYNC message: %d (%s)", i, ret, strerror (-ret)); } break; default: fail ("wrong message type: %d", type); } } return ret; } static bool verify_node_state_across_group (struct group* group, int const idx, gcs_node_state_t const check) { bool ret = false; int i; for (i = 0; i < group->nodes_num; i++) { gcs_node_state_t state = group->nodes[i]->group.nodes[idx].status; if (check != state) { gu_error("At node %d node's %d status is not %d, but %d", i, idx, check, state); ret = true; } } return ret; } /* start SST on behalf of node idx (joiner) */ static long group_sst_start (struct group* group, int const src_idx, const char* donor) { ssize_t const req_len = strlen (donor) + 2; // leave one byte as sst request payload int donor_idx = -1; int i; for (i = 0; i < group->nodes_num; i++) { // sst request is expected to be dynamically allocated char* req_buf = (char*)malloc (req_len); fail_if (NULL == req_buf); sprintf (req_buf, "%s", donor); struct gcs_act_rcvd req( gcs_act(req_buf, req_len, GCS_ACT_STATE_REQ), NULL, GCS_SEQNO_ILL, src_idx); long ret; ret = gcs_group_handle_state_request (&group->nodes[i]->group, &req); if (ret < 0) { // don't fail here, we may want to test negatives gu_error (ret < 0, "Handling state request to '%s' failed: %d (%s)", donor, ret, strerror (-ret)); return ret; } if (i == src_idx) { fail_if (ret != req_len); free (req_buf); // passed to joiner } else { if (ret > 0) { if (donor_idx < 0) { fail_if (req.id != i); donor_idx = i; free (req_buf); // passed to donor } else { fail ("More than one donor selected: %d, first donor: %d", i, donor_idx); } } } } fail_if (donor_idx < 0, "Failed to select donor"); for (i = 0; i < group->nodes_num; i++) { gcs_group_t* const gr = &group->nodes[i]->group; gcs_node_t* const donor = &gr->nodes[donor_idx]; gcs_node_state_t state = donor->status; fail_if (state != GCS_NODE_STATE_DONOR, "%d is not donor at %d", donor_idx, i); int dc = donor->desync_count; fail_if (dc < 1, "donor %d at %d has desync_count %d", donor_idx, i,dc); gcs_node_t* const joiner = &gr->nodes[src_idx]; state = joiner->status; fail_if (state != GCS_NODE_STATE_JOINER, "%d is not joiner at %d", src_idx, i); dc = joiner->desync_count; fail_if (dc != 0, "joiner %d at %d has desync_count %d",donor_idx,i,dc); /* check that donor and joiner point at each other */ fail_if (memcmp (gr->nodes[donor_idx].joiner, gr->nodes[src_idx].id, GCS_COMP_MEMB_ID_MAX_LEN+1), "Donor points at wrong joiner: expected %s, got %s", gr->nodes[src_idx].id, gr->nodes[donor_idx].joiner); fail_if (memcmp (gr->nodes[src_idx].donor, gr->nodes[donor_idx].id, GCS_COMP_MEMB_ID_MAX_LEN+1), "Joiner points at wrong donor: expected %s, got %s", gr->nodes[donor_idx].id, gr->nodes[src_idx].donor); } return 0; } /* Thes test was specifically created to reproduce #465 */ START_TEST(gcs_memb_test_465) { struct group group; group.nodes_num = 0; struct node nodes[MAX_NODES]; int i; ssize_t ret = 0; // initialize individual node structures for (i = 0; i < MAX_NODES; i++) { int const str_len = 32; char name_str[str_len]; char addr_str[str_len]; sprintf(name_str, "node%d", i); sprintf(addr_str, "addr%d", i); gcs_group_init (&nodes[i].group, NULL, name_str, addr_str, 0, 0, 0); } gcs_node_state_t node_state; // bootstrap the cluster group_add_node (&group, &nodes[0], true); fail_if (nodes[0].group.state != GCS_GROUP_PRIMARY); node_state = get_node_state (&nodes[0]); fail_if (node_state != GCS_NODE_STATE_JOINED); deliver_join_sync_msg (&group, 0, GCS_MSG_SYNC); node_state = get_node_state (&nodes[0]); fail_if (node_state != GCS_NODE_STATE_SYNCED); group_add_node (&group, &nodes[1], true); fail_if (nodes[1].group.state != GCS_GROUP_PRIMARY); node_state = get_node_state (&nodes[1]); fail_if (node_state != GCS_NODE_STATE_PRIM); // need sst group_add_node (&group, &nodes[2], true); fail_if (nodes[2].group.state != GCS_GROUP_PRIMARY); node_state = get_node_state (&nodes[2]); fail_if (node_state != GCS_NODE_STATE_PRIM); // need sst fail_if (verify_node_state_across_group (&group, 0, GCS_NODE_STATE_SYNCED)); group_sst_start (&group, 2, nodes[0].group.nodes[0].name); mark_point(); deliver_join_sync_msg (&group, 0, GCS_MSG_JOIN); // end of donor SST deliver_join_sync_msg (&group, 0, GCS_MSG_SYNC); // donor synced deliver_join_sync_msg (&group, 2, GCS_MSG_SYNC); // joiner can't sync fail_if (verify_node_state_across_group (&group, 2, GCS_NODE_STATE_JOINER)); deliver_join_sync_msg (&group, 2, GCS_MSG_JOIN); // end of joiner SST deliver_join_sync_msg (&group, 2, GCS_MSG_SYNC); // joiner synced fail_if (verify_node_state_across_group (&group, 0, GCS_NODE_STATE_SYNCED)); fail_if (verify_node_state_across_group (&group, 1, GCS_NODE_STATE_PRIM)); fail_if (verify_node_state_across_group (&group, 2, GCS_NODE_STATE_SYNCED)); group_sst_start (&group, 1, nodes[0].group.nodes[0].name); deliver_join_sync_msg (&group, 0, GCS_MSG_JOIN); // end of donor SST deliver_join_sync_msg (&group, 1, GCS_MSG_JOIN); // end of joiner SST struct node* dropped = group_drop_node (&group, 1); fail_if (NULL == dropped); /* After that, according to #465, node 1 shifted from SYNCED to PRIMARY */ fail_if (verify_node_state_across_group (&group, 1, GCS_NODE_STATE_SYNCED)); struct gcs_act act; int proto_ver = -1; ret = gcs_group_act_conf (&group.nodes[1]->group, &act, &proto_ver); fail_if (ret <= 0, "gcs_group_act_cnf() retruned %zd (%s)", ret, strerror (-ret)); fail_if (ret != act.buf_len); fail_if (proto_ver != 0 /* current version */, "proto_ver = %d", proto_ver); const gcs_act_conf_t* conf = (const gcs_act_conf_t*)act.buf; fail_if (NULL == conf); fail_if (conf->my_idx != 1); /* according to #465 this was GCS_NODE_STATE_PRIM */ fail_if (conf->my_state != GCS_NODE_STATE_SYNCED); deliver_join_sync_msg (&group, 0, GCS_MSG_SYNC); // donor synced fail_if (verify_node_state_across_group (&group, 0, GCS_NODE_STATE_SYNCED)); } END_TEST Suite *gcs_memb_suite(void) { Suite *suite = suite_create("GCS membership changes"); TCase *tcase = tcase_create("gcs_memb"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_memb_test_465); return suite; } galera-3-25.3.20/gcs/src/gcs_spread.cpp0000644000015300001660000004642313042054732017330 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*****************************************/ /* Implementation of Spread GC backend */ /*****************************************/ #include #include #include #include #include #include #include "gcs_spread.h" #include "gcs_comp_msg.h" #define SPREAD_MAX_GROUPS 256 #if (GCS_COMP_MEMB_ID_MAX_LEN < MAX_GROUP_NAME) #error "GCS_COMP_MEMB_ID_MAX_LEN is smaller than Spread's MAX_GROUP_NAME" #error "This can make creation of component message impossible." #endif typedef struct string_array { int32 max_strings; int32 num_strings; char strings[0][MAX_GROUP_NAME]; } string_array_t; static string_array_t* string_array_alloc (const long n) { string_array_t *ret = NULL; ret = gu_malloc (sizeof (string_array_t) + n * MAX_GROUP_NAME); if (ret) { ret->max_strings = n; ret->num_strings = 0; } return ret; } static void string_array_free (string_array_t *a) { gu_free (a); } typedef enum spread_config { SPREAD_REGULAR, SPREAD_TRANSITIONAL } spread_config_t; typedef struct gcs_backend_conn { char *socket; char *channel; char *priv_name; char *priv_group; char *sender; long msg_type; long my_id; /* process ID returned with REG_MEMB message */ long config_id; // long memb_num; string_array_t *memb; string_array_t *groups; gcs_comp_msg_t *comp_msg; spread_config_t config; /* type of configuration: regular or trans */ mailbox mbox; } spread_t; /* this function converts socket address from conventional * "addr:port" notation to Spread's "port@addr" notation */ static long gcs_to_spread_socket (const char const *socket, char **sp_socket) { char *colon = strrchr (socket, ':'); size_t addr_len = colon - socket; size_t port_len = strlen (socket) - addr_len - 1; char *sps = NULL; if (!colon) return -EADDRNOTAVAIL; sps = (char *) strdup (socket); if (!sps) return -ENOMEM; memcpy (sps, colon+1, port_len); memcpy (sps + port_len + 1, socket, addr_len); sps[port_len] = '@'; *sp_socket = sps; return 0; } static const char* spread_default_socket = "localhost:4803"; static long spread_create (spread_t** spread, const char* socket) { long err = 0; spread_t *sp = GU_CALLOC (1, spread_t); *spread = NULL; if (!sp) { err = -ENOMEM; goto out0; } if (NULL == socket || strlen(socket) == 0) socket = spread_default_socket; err = gcs_to_spread_socket (socket, &sp->socket); if (err < 0) { goto out1; } sp->priv_name = GU_CALLOC (MAX_PRIVATE_NAME, char); if (!sp->priv_name) { err = -ENOMEM; goto out3; } sp->priv_group = GU_CALLOC (MAX_GROUP_NAME, char); if (!sp->priv_group) { err = -ENOMEM; goto out4; } sp->sender = GU_CALLOC (MAX_GROUP_NAME, char); if (!sp->sender) { err = -ENOMEM; goto out5; } sp->groups = string_array_alloc (SPREAD_MAX_GROUPS); if (!sp->groups) { err = -ENOMEM; goto out6; } sp->memb = string_array_alloc (SPREAD_MAX_GROUPS); if (!sp->memb) { err = -ENOMEM; goto out7; } sp->config = SPREAD_TRANSITIONAL; sp->config_id = -1; sp->comp_msg = NULL; gu_debug ("sp->priv_group: %p", sp->priv_group); *spread = sp; return err; out7: string_array_free (sp->groups); out6: gu_free (sp->sender); out5: gu_free (sp->priv_group); out4: gu_free (sp->priv_name); out3: free (sp->socket); out1: gu_free (sp); out0: return err; } /* Compiles a string of MAX_PRIVATE_NAME characters out of a supplied string and a number, returns -1 if digits overflow */ long spread_priv_name (char *name, const char *string, long n) { /* must make sure that it does not overflow MAX_PRIVATE_NAME */ long max_digit = 2; long max_string = MAX_PRIVATE_NAME - max_digit; long len = snprintf (name, max_string + 1, "%s", string); if (len > max_string) len = max_string; // truncated gu_debug ("len = %d, max_string = %d, MAX_PRIVATE_NAME = %d\n", len, (int)max_string, MAX_PRIVATE_NAME); len = snprintf (name + len, max_digit + 1, "_%d", (int)n); if (len > max_digit) return -1; // overflow return 0; } static GCS_BACKEND_CLOSE_FN(spread_close) { long err = 0; spread_t *spread = backend->conn; if (!spread) return -EBADFD; err = SP_leave (spread->mbox, spread->channel); if (err) { switch (err) { case ILLEGAL_GROUP: return -EADDRNOTAVAIL; case ILLEGAL_SESSION: return -ENOTCONN; case CONNECTION_CLOSED: return -ECONNRESET; default: return -EOPNOTSUPP; } } else { return 0; } } static GCS_BACKEND_DESTROY_FN(spread_destroy) { long err = 0; spread_t *spread = backend->conn; if (!spread) return -EBADFD; err = SP_disconnect (spread->mbox); if (spread->memb) string_array_free (spread->memb); if (spread->groups) string_array_free (spread->groups); if (spread->sender) gu_free (spread->sender); if (spread->priv_name) gu_free (spread->priv_name); if (spread->priv_group) gu_free (spread->priv_group); if (spread->channel) free (spread->channel); // obtained by strdup() if (spread->socket) free (spread->socket); if (spread->comp_msg) gcs_comp_msg_delete(spread->comp_msg); gu_free (spread); backend->conn = NULL; if (err) { switch (err) { case ILLEGAL_GROUP: return -EADDRNOTAVAIL; case ILLEGAL_SESSION: return -ENOTCONN; case CONNECTION_CLOSED: return -ECONNRESET; default: return -EOPNOTSUPP; } } else { return 0; } } static GCS_BACKEND_SEND_FN(spread_send) { long ret = 0; spread_t *spread = backend->conn; if (SPREAD_TRANSITIONAL == spread->config) return -EAGAIN; /* can it be that not all of the message is sent? */ ret = SP_multicast (spread->mbox, // mailbox SAFE_MESS, // service type spread->channel, // destination group (short)msg_type, // message from application len, // message length (const char*)buf // message buffer ); if (ret != len) { if (ret > 0) return -ECONNRESET; /* Failed to send the whole message */ switch (ret) { case ILLEGAL_SESSION: return -ENOTCONN; case CONNECTION_CLOSED: return -ECONNRESET; default: return -EOPNOTSUPP; } } #ifdef GCS_DEBUG_SPREAD // gu_debug ("spread_send: message sent: %p, len: %d\n", buf, ret); #endif return ret; } /* Substitutes old member array for new (taken from groups), * creates new groups buffer. */ static inline long spread_update_memb (spread_t* spread) { string_array_t* new_groups = string_array_alloc (SPREAD_MAX_GROUPS); if (!new_groups) return -ENOMEM; string_array_free (spread->memb); spread->memb = spread->groups; spread->groups = new_groups; return 0; } /* Temporarily this is done by simple iteration through the whole list. * for a cluster of 2-3 nodes this is probably most optimal. * But it clearly needs to be improved. */ static inline long spread_sender_id (const spread_t* const spread, const char* const sender_name) { long id; for (id = 0; id < spread->memb->num_strings; id++) { if (!strncmp(sender_name, spread->memb->strings[id], MAX_GROUP_NAME)) return id; } return GCS_SENDER_NONE; } static gcs_comp_msg_t* spread_comp_create (long my_id, long config_id, long memb_num, char names[][MAX_GROUP_NAME]) { gcs_comp_msg_t* comp = gcs_comp_msg_new (memb_num > 0, my_id, memb_num); long ret = -ENOMEM; if (comp) { long i; for (i = 0; i < memb_num; i++) { ret = gcs_comp_msg_add (comp, names[i]); if (ret != i) { gcs_comp_msg_delete (comp); goto fatal; } } gu_debug ("Created a component message of length %d.", gcs_comp_msg_size(comp)); return comp; } fatal: gu_fatal ("Failed to allocate component message: %s", strerror(-ret)); return NULL; } /* This function actually finalizes component message delivery: * it makes sure that the caller will receive the message and only then * changes handle state (spread->config)*/ static long spread_comp_deliver (spread_t* spread, void* buf, long len, gcs_msg_type_t* msg_type) { long ret; assert (spread->comp_msg); ret = gcs_comp_msg_size (spread->comp_msg); if (ret <= len) { memcpy (buf, spread->comp_msg, ret); spread->config = SPREAD_REGULAR; gcs_comp_msg_delete (spread->comp_msg); spread->comp_msg = NULL; *msg_type = GCS_MSG_COMPONENT; gu_debug ("Component message delivered (length %ld)", ret); } else { // provided buffer is too small for a message: // simply return required size } return ret; } static GCS_BACKEND_RECV_FN(spread_recv) { long ret = 0; spread_t *spread = backend->conn; service serv_type; int16 mess_type; int32 endian_mismatch; /* in case of premature exit */ *sender_idx = GCS_SENDER_NONE; *msg_type = GCS_MSG_ERROR; if (spread->comp_msg) { /* undelivered regular component message */ return spread_comp_deliver (spread, buf, len, msg_type); } if (!len) { // Spread does not seem to tolerate 0-sized buffer return 4096; } while (1) /* Loop while we don't receive the right message */ { ret = SP_receive (spread->mbox, // mailbox/connection &serv_type, // service type: // REGULAR_MESS/MEMBERSHIP_MESS spread->sender, // private group name of a sender spread->groups->max_strings, &spread->groups->num_strings, spread->groups->strings, &mess_type, // app. defined message type &endian_mismatch, len, // maximum message length (char*)buf // message buffer ); // gcs_log ("gcs_spread_recv: SP_receive returned\n"); // gcs_log ("endian_mismatch = %d\n", endian_mismatch); // /* seems there is a bug in either libsp or spread daemon */ // if (spread->groups->num_strings < 0 && ret > 0) // ret = GROUPS_TOO_SHORT; /* First, handle errors */ if (ret < 0) { switch (ret) { case BUFFER_TOO_SHORT: { if (Is_membership_mess (serv_type)) { // Ignore this error as membership messages don't fill // the buffer. Spread seems to have a bug - it returns // BUFFER_TOO_SHORT if you pass zero-length buffer for it. gu_debug ("BUFFER_TOO_SHORT in membership message."); ret = 0; break; } /* return required buffer size to caller */ gu_debug ("Error in SP_receive: BUFFER_TOO_SHORT"); gu_debug ("Supplied buffer len: %d, required: %d", len, (int) -endian_mismatch); gu_debug ("Message type: %d, sender: %d", mess_type, spread_sender_id (spread, spread->sender)); return -endian_mismatch; } case GROUPS_TOO_SHORT: { /* reallocate groups */ size_t num_groups = -spread->groups->num_strings; gu_warn ("Error in SP_receive: GROUPS_TOO_SHORT. " "Expect failure."); string_array_free (spread->groups); spread->groups = string_array_alloc (num_groups); if (!spread->groups) return -ENOMEM; /* try again */ continue; } case ILLEGAL_SESSION: gu_debug ("Error in SP_receive: ILLEGAL_SESSION"); return -ECONNABORTED; case CONNECTION_CLOSED: gu_debug ("Error in SP_receive: CONNECTION_CLOSED"); return -ECONNABORTED; case ILLEGAL_MESSAGE: gu_debug ("Error in SP_receive: ILLEGAL_MESSAGE"); continue; // wait for a legal one? default: gu_fatal ("unknown error = %d", ret); return -ENOTRECOVERABLE; } } /* At this point message was successfully received * and stored in buffer. */ if (Is_regular_mess (serv_type)) { // gu_debug ("received REGULAR message of type %d\n", // mess_type); assert (endian_mismatch >= 0); /* BUFFER_TOO_SMALL * must be handled before */ if (endian_mismatch) { gu_debug ("Spread returned ENDIAN_MISMATCH. Ignored."); } *msg_type = mess_type; *sender_idx = spread_sender_id (spread, spread->sender); assert (*sender_idx >= 0); assert (*sender_idx < spread->memb->num_strings); break; } else if (Is_membership_mess (serv_type)) { if (strncmp (spread->channel, spread->sender, MAX_GROUP_NAME)) continue; // wrong group/channel if (Is_transition_mess (serv_type)) { spread->config = SPREAD_TRANSITIONAL; gu_info ("Received TRANSITIONAL message"); continue; } else if (Is_reg_memb_mess (serv_type)) { //assert (spread->groups->num_strings > 0); spread->my_id = mess_type; gu_info ("Received REGULAR MEMBERSHIP " "in group \'%s\' with %d(%d) members " "where I'm member %d\n", spread->sender, spread->groups->num_strings, spread->groups->max_strings, spread->my_id); spread->config_id++; gu_debug ("Configuration number: %d", spread->config_id); spread->comp_msg = spread_comp_create (spread->my_id, spread->config_id, spread->groups->num_strings, spread->groups->strings); if (!spread->comp_msg) return -ENOTRECOVERABLE; /* Update membership info */ if ((ret = spread_update_memb(spread))) return ret; if (Is_caused_join_mess (serv_type)) { gu_info ("due to JOIN"); } else if (Is_caused_leave_mess (serv_type)) { gu_info ("due to LEAVE"); } else if (Is_caused_disconnect_mess (serv_type)) { gu_info ("due to DISCONNECT"); } else if (Is_caused_network_mess (serv_type)) { gu_info ("due to NETWORK"); } else { gu_warn ("unknown REG_MEMB message"); } ret = spread_comp_deliver (spread, buf, len, msg_type); } else if (Is_caused_leave_mess (serv_type)) { gu_info ("received SELF LEAVE message"); // *msg_type = GCS_MSG_COMPONENT; // memset (buf, 0, len); // trivial component spread->comp_msg = gcs_comp_msg_leave (); ret = spread_comp_deliver (spread, buf, len, msg_type); } else { gu_warn ("received unknown MEMBERSHIP message"); continue; // must do something ??? } } else if (Is_reject_mess (serv_type)) { gu_info ("received REJECTED message form %s", spread->sender); continue; } else /* Unknown message type */ { gu_warn ("received message of unknown type"); continue; } /* If we reached this point we have successfully received a message */ break; } /* message is already in buf and its length in ret */ return ret; } static GCS_BACKEND_NAME_FN(spread_name) { static char str[128]; int maj, min, patch; SP_version (&maj, &min, &patch); snprintf (str, 128, "Spread %d.%d.%d", maj, min, patch); return str; } /* Spread packet structure seem to be: * 42 bytes - Ethernet + IP + UDP header, 32 bytes Spread packet header + * 80 byte Spread message header present only in the first packet */ static GCS_BACKEND_MSG_SIZE_FN(spread_msg_size) { long ps = pkt_size; long frames = 0; const long eth_frame_size = 1514; const long spread_header_size = 154; // total headers in Spread packet const long spread_max_pkt_size = 31794; // 21 Ethernet frames if (pkt_size <= spread_header_size) { ps = spread_header_size + 1; gu_warn ("Requested packet size %d is too small, " "minimum possible is %d", pkt_size, ps); return pkt_size - ps; } if (pkt_size > spread_max_pkt_size) { ps = spread_max_pkt_size; gu_warn ("Requested packet size %d is too big, " "using maximum possible: %d", pkt_size, ps); } frames = ps / eth_frame_size; frames += ((frames * eth_frame_size) < ps); // one incomplete frame return (ps - frames * (42 + 32) - 80); } static GCS_BACKEND_OPEN_FN(spread_open) { long err = 0; spread_t* spread = backend->conn; if (!spread) return -EBADFD; if (!channel) { gu_error ("No channel supplied."); return -EINVAL; } spread->channel = strdup (channel); if (!spread->channel) return -ENOMEM; err = SP_join (spread->mbox, spread->channel); if (err) { switch (err) /* translate error codes */ { case ILLEGAL_GROUP: err = -EADDRNOTAVAIL; break; case ILLEGAL_SESSION: err = -EADDRNOTAVAIL; break; case CONNECTION_CLOSED: err = -ENETRESET; break; default: err = -ENOTCONN; break; } gu_error ("%s", strerror (-err)); return err; } gu_info ("Joined channel: %s", spread->channel); return err; } #if defined(__linux__) extern char *program_invocation_short_name; #endif GCS_BACKEND_CREATE_FN(gcs_spread_create) { long err = 0; long n = 0; spread_t* spread = NULL; backend->conn = NULL; if (!socket) { gu_error ("No socket supplied."); err = -EINVAL; goto out0; } if ((err = spread_create (&spread, socket))) goto out0; do { /* Try to generate unique name */ if (spread_priv_name (spread->priv_name, #if defined(__sun__) getexecname (), #elif defined(__APPLE__) || defined(__FreeBSD__) getprogname (), #elif defined(__linux__) program_invocation_short_name, #else "unknown", #endif n++)) { /* Failed to generate a name in the form * program_name_number. Let spread do it for us */ gu_free (spread->priv_name); spread->priv_name = NULL; } err = SP_connect (spread->socket, spread->priv_name, 0, 1, &spread->mbox, spread->priv_group); } while (REJECT_NOT_UNIQUE == err); if (err < 0) { gu_debug ("Spread connect error"); switch (err) /* translate error codes */ { case ILLEGAL_SPREAD: err = -ESOCKTNOSUPPORT; break; case COULD_NOT_CONNECT: err = -ENETUNREACH; break; case CONNECTION_CLOSED: err = -ENETRESET; break; case REJECT_ILLEGAL_NAME: err = -EADDRNOTAVAIL; gu_error ("Spread returned REJECT_ILLEGAL_NAME"); break; case REJECT_NO_NAME: err = -EDESTADDRREQ; gu_error ("Spread returned REJECT_NO_NAME." "Spread protocol error"); break; case REJECT_VERSION: default: gu_error ("Generic Spread error code: %d", err); err = -EPROTONOSUPPORT; break; } goto out1; } else { assert (err == ACCEPT_SESSION); err = 0; } gu_debug ("Connected to Spread: priv_name = %s, priv_group = %s", spread->priv_name, spread->priv_group); backend->conn = spread; backend->open = spread_open; backend->close = spread_close; backend->send = spread_send; backend->recv = spread_recv; backend->name = spread_name; backend->msg_size = spread_msg_size; backend->destroy = spread_destroy; return err; out1: spread_destroy (backend); out0: gu_error ("Creating Spread backend failed: %s (%d)", strerror (-err), err); return err; } galera-3-25.3.20/gcs/src/gcs.hpp0000644000015300001660000004255213042054732015776 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /*! * @file gcs.c Public GCS API */ #ifndef _gcs_h_ #define _gcs_h_ #include "gcs_gcache.hpp" #include #include #include #include #include #include #include #include #include /*! @typedef @brief Sequence number type. */ typedef int64_t gcs_seqno_t; /*! @def @brief Illegal sequence number. Action not serialized. */ static const gcs_seqno_t GCS_SEQNO_ILL = -1; /*! @def @brief Empty state. No actions applied. */ static const gcs_seqno_t GCS_SEQNO_NIL = 0; /*! @def @brief Start of the sequence */ static const gcs_seqno_t GCS_SEQNO_FIRST = 1; /*! @def @brief history UUID length */ #define GCS_UUID_LEN 16 /*! @def @brief maximum supported size of an action (2GB - 1) */ #define GCS_MAX_ACT_SIZE 0x7FFFFFFF /*! Connection handle type */ typedef struct gcs_conn gcs_conn_t; /*! @brief Creates GCS connection handle. * * @param conf gu_config_t* configuration object, can be null. * @param cache pointer to the gcache object. * @param node_name human readable name of the node, can be null. * @param inc_addr address at which application accepts incoming requests. * Used for load balancing, can be null. * @param repl_proto_ver max replicator protocol version. * @param appl_proto_ver max application ptotocol version. * @return pointer to GCS connection handle, NULL in case of failure. */ extern gcs_conn_t* gcs_create (gu_config_t* conf, gcache_t* cache, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver); /*! @brief Initialize group history values (optional). * Serves to provide group history persistence after process restart (in case * these data were saved somewhere on persistent storage or the like). If these * values are provided, it is only a hint for the group, as they might be * outdated. Actual seqno and UUID are returned in GCS_ACT_CONF action (see * below) and are determined by quorum. * * This function must be called before gcs_open() or after gcs_close(). * * @param seqno Sequence number of the application state (last action applied). * Should be negative for undefined state. * @param uuid UUID of the sequence (group ID). * Should be all zeroes for undefined state. * * @return 0 in case of success, -EBUSY if conneciton is already opened, * -EBADFD if connection object is being destroyed. */ extern long gcs_init (gcs_conn_t *conn, gcs_seqno_t seqno, const uint8_t uuid[GCS_UUID_LEN]); /*! @brief Opens connection to group (joins channel). * * @param conn connection object * @param channel a name of the channel to join. It must uniquely identify * the channel. If the channel with such name does not exist, * it is created. Processes that joined the same channel * receive the same actions. * @param url an URL-like string that specifies backend communication * driver in the form "TYPE://ADDRESS?options". For gcomm * backend it can be "gcomm://localhost:4567", for dummy backend * ADDRESS field is ignored. * Currently supported backend types: "dummy", "vsbes", "gcomm" * @param bootstrap bootstrap a new group * * @return negative error code, 0 in case of success. */ extern long gcs_open (gcs_conn_t *conn, const char *channel, const char *url, bool bootstrap); /*! @brief Closes connection to group. * * @param conn connection handle * @return negative error code or 0 in case of success. */ extern long gcs_close (gcs_conn_t *conn); /*! @brief Frees resources associuated with connection handle. * * @param conn connection handle * @return negative error code or 0 in case of success. */ extern long gcs_destroy (gcs_conn_t *conn); /*! @brief Deprecated. Waits until the group catches up. * This call checks if any member of the group (including this one) has a * long slave queue. Should be called before gcs_repl(), gcs_send(). * * @return negative error code, 1 if wait is required, 0 otherwise */ extern long gcs_wait (gcs_conn_t *conn); /*! @typedef @brief Action types. * There is a conceptual difference between "messages" * and "actions". Messages are ELEMENTARY pieces of information * atomically delivered by group communication. They are typically * limited in size to a single IP packet. Events generated by group * communication layer must be delivered as a single message. * * For the purpose of this work "action" is a higher level concept * introduced to overcome the message size limitation. Application * replicates information in actions of ARBITRARY size that are * fragmented into as many messages as needed. As such actions * can be delivered only in primary configuration, when total order * of underlying messages is established. * The best analogy for action/message concept would be word/letter. * * The purpose of GCS library is to hide message handling from application. * Therefore application deals only with "actions". * Application can only send actions of types GCS_ACT_TORDERED, * GCS_ACT_COMMIT_CUT and GCS_ACT_STATE_REQ. * Actions of type GCS_ACT_SYNC, GCS_ACT_CONF are generated by the library. */ typedef enum gcs_act_type { /* ordered actions */ GCS_ACT_TORDERED, //! action representing state change, will be assigned // global seqno GCS_ACT_COMMIT_CUT, //! group-wide action commit cut GCS_ACT_STATE_REQ, //! request for state transfer GCS_ACT_CONF, //! new configuration GCS_ACT_JOIN, //! joined group (received all state data) GCS_ACT_SYNC, //! synchronized with group GCS_ACT_FLOW, //! flow control GCS_ACT_SERVICE, //! service action, sent by GCS GCS_ACT_ERROR, //! error happened while receiving the action GCS_ACT_UNKNOWN //! undefined/unknown action type } gcs_act_type_t; /*! String representations of action types */ extern const char* gcs_act_type_to_str(gcs_act_type_t); /*! @brief Sends a vector of buffers as a single action to group and returns. * A copy of action will be returned through gcs_recv() call, or discarded * in case it is not delivered by group. * For a better means to replicate an action see gcs_repl(). @see gcs_repl() * * @param conn group connection handle * @param act_bufs action buffer vector * @param act_size total action size (the sum of buffer sizes) * @param act_type action type * @param scheduled whether the call was scheduled by gcs_schedule() * @return negative error code, action size in case of success * @retval -EINTR thread was interrupted while waiting to enter the monitor */ extern long gcs_sendv (gcs_conn_t* conn, const struct gu_buf* act_bufs, size_t act_size, gcs_act_type_t act_type, bool scheduled); /*! A wrapper for single buffer communication */ static inline long gcs_send (gcs_conn_t* const conn, const void* const act, size_t const act_size, gcs_act_type_t const act_type, bool const scheduled) { struct gu_buf const buf = { act, static_cast(act_size) }; return gcs_sendv (conn, &buf, act_size, act_type, scheduled); } /*!*/ struct gcs_action { const void* buf; /*! unlike input, output goes as a single buffer */ ssize_t size; gcs_seqno_t seqno_g; gcs_seqno_t seqno_l; gcs_act_type_t type; }; /*! @brief Replicates a vector of buffers as a single action. * Sends action to group and blocks until it is received. Upon return global * and local IDs are set. Arguments are the same as in gcs_recv(). * @see gcs_recv() * * @param conn group connection handle * @param act_in action buffer vector (total size is passed in action) * @param action action struct * @param scheduled whether the call was preceded by gcs_schedule() * @return negative error code, action size in case of success * @retval -EINTR: thread was interrupted while waiting to enter the monitor */ extern long gcs_replv (gcs_conn_t* conn, const struct gu_buf* act_in, struct gcs_action* action, bool scheduled); /*! A wrapper for single buffer communication */ static inline long gcs_repl (gcs_conn_t* const conn, struct gcs_action* const action, bool const scheduled) { struct gu_buf const buf = { action->buf, action->size }; return gcs_replv (conn, &buf, action, scheduled); } /*! @brief Receives an action from group. * Blocks if no actions are available. Action buffer is allocated by GCS * and must be freed by application when action is no longer needed. * Also sets global and local action IDs. Global action ID uniquely identifies * action in the history of the group and can be used to identify the state * of the application for state snapshot purposes. Local action ID is a * monotonic gapless number sequence starting with 1 which can be used * to serialize access to critical sections. * * @param conn group connection handle * @param action action object * @return negative error code, action size in case of success, * @retval 0 on connection close */ extern long gcs_recv (gcs_conn_t* conn, struct gcs_action* action); /*! * @brief Schedules entry to CGS send monitor. * Locks send monitor and should be quickly followed by gcs_repl()/gcs_send() * * @retval 0 - won't queue * @retval >0 - queue handle * @retval -EAGAIN - too many queued threads * @retval -EBADFD - connection is closed */ extern long gcs_schedule (gcs_conn_t* conn); /*! * @brief Interrupt a thread waiting to enter send monitor. * * @param conn GCS connection * @param handle queue handle returned by @func gcs_schedule(). Must be > 0 * * @retval 0 success * @retval -ESRCH no such thread/already interrupted */ extern long gcs_interrupt (gcs_conn_t* conn, long handle); /*! * Resume receivng from group. * * @param conn GCS connection * * @retval 0 success * @retval -EBADFD connection is in closed state */ extern long gcs_resume_recv (gcs_conn_t* conn); /*! * After action with this seqno is applied, this thread is guaranteed to see * all the changes made by the client, even on other nodes. * * @return global sequence number or negative error code */ extern gcs_seqno_t gcs_caused(gcs_conn_t* conn); /*! @brief Sends state transfer request * Broadcasts state transfer request which will be passed to one of the * suitable group members. * * @param conn connection to group * @param ver STR version. * @param req opaque byte array that contains data required for * the state transfer (application dependent) * @param size request size * @param donor desired state transfer donor name. Supply empty string to * choose automatically. * @param seqno response to request was ordered with this seqno. * Must be skipped in local queues. * @return negative error code, index of state transfer donor in case of success * (notably, -EAGAIN means try later, -EHOSTUNREACH means desired donor * is unavailable) */ extern long gcs_request_state_transfer (gcs_conn_t *conn, int ver, const void *req, size_t size, const char *donor, const gu_uuid_t* ist_uuid, gcs_seqno_t ist_seqno, gcs_seqno_t *seqno); /*! @brief Turns off flow control on the node. * Effectively desynchronizes the node from the cluster (while the node keeps on * receiving all the actions). Requires gcs_join() to return to normal. * * @param conn connection to group * @param seqno response to request was ordered with this seqno. * Must be skipped in local queues. * @return negative error code, 0 in case of success. */ extern long gcs_desync (gcs_conn_t* conn, gcs_seqno_t* seqno); /*! @brief Informs group on behalf of donor that state stransfer is over. * If status is non-negative, joiner will be considered fully joined to group. * * @param conn opened connection to group * @param status negative error code in case of state transfer failure, * 0 or (optional) seqno corresponding to transferred state. * @return negative error code, 0 in case of success */ extern long gcs_join (gcs_conn_t *conn, gcs_seqno_t status); /*! @brief Allocate local seqno for accessing local resources. * * * @param conn connection to group * @return local seqno, negative error code in case of error */ extern gcs_seqno_t gcs_local_sequence(gcs_conn_t* conn); /////////////////////////////////////////////////////////////////////////////// /* Service functions */ /*! Informs group about the last applied action on this node */ extern long gcs_set_last_applied (gcs_conn_t* conn, gcs_seqno_t seqno); /* GCS Configuration */ /*! Registers configurable parameters with conf object * @return false if success, true if error happened */ extern bool gcs_register_params (gu_config_t* conf); /*! sets the key to a given value * * @return 0 in case of success, 1 if key not found or negative error code */ extern long gcs_param_set (gcs_conn_t* conn, const char* key, const char *value); /*! returns the value of the key * * @return NULL if key not found */ extern const char* gcs_param_get (gcs_conn_t* conn, const char* key); /* Logging options */ extern long gcs_conf_set_log_file (FILE *file); extern long gcs_conf_set_log_callback (void (*logger) (int, const char*)); extern long gcs_conf_self_tstamp_on (); extern long gcs_conf_self_tstamp_off (); extern long gcs_conf_debug_on (); extern long gcs_conf_debug_off (); /* Sending options (deprecated, use gcs_param_set instead) */ /* Sets maximum DESIRED network packet size. * For best results should be multiple of MTU */ extern long gcs_conf_set_pkt_size (gcs_conn_t *conn, long pkt_size); #define GCS_DEFAULT_PKT_SIZE 64500 /* 43 Eth. frames to carry max IP packet */ /* * Configuration action */ /*! Possible node states */ typedef enum gcs_node_state { GCS_NODE_STATE_NON_PRIM, /// in non-primary configuration, outdated state GCS_NODE_STATE_PRIM, /// in primary conf, needs state transfer GCS_NODE_STATE_JOINER, /// in primary conf, receiving state transfer GCS_NODE_STATE_DONOR, /// joined, donating state transfer GCS_NODE_STATE_JOINED, /// contains full state GCS_NODE_STATE_SYNCED, /// syncronized with group GCS_NODE_STATE_MAX } gcs_node_state_t; /*! Convert state code to null-terminates string */ extern const char* gcs_node_state_to_str (gcs_node_state_t state); /*! New configuration action */ typedef struct gcs_act_conf { gcs_seqno_t seqno; //! last global seqno applied by this group gcs_seqno_t conf_id; //! configuration ID (-1 if non-primary) uint8_t uuid[GCS_UUID_LEN];/// group UUID long memb_num; //! number of members in configuration long my_idx; //! index of this node in the configuration gcs_node_state_t my_state; //! current node state int repl_proto_ver; //! replicator protocol version to use int appl_proto_ver; //! application protocol version to use char data[1]; /*! member array (null-terminated ID, name, * incoming address, 8-byte cached seqno) */ } gcs_act_conf_t; typedef struct gcs_backend_stats { struct stats_t { const char* key; const char* value; }* stats; void* ctx; } gcs_backend_stats_t; struct gcs_stats { double send_q_len_avg; //! average send queue length per send call double recv_q_len_avg; //! average recv queue length per queued action long long fc_paused_ns; //! total nanoseconds spent in paused state double fc_paused_avg; //! faction of time paused due to flow control long long fc_sent; //! flow control stops sent long long fc_received; //! flow control stops received size_t recv_q_size; //! current recv queue size int recv_q_len; //! current recv queue length int recv_q_len_max; //! maximum recv queue length int recv_q_len_min; //! minimum recv queue length int send_q_len; //! current send queue length int send_q_len_max; //! maximum send queue length int send_q_len_min; //! minimum send queue length gcs_backend_stats_t backend_stats; //! backend stats. }; /*! Fills stats struct */ extern void gcs_get_stats (gcs_conn_t *conn, struct gcs_stats* stats); /*! flushes stats counters */ extern void gcs_flush_stats(gcs_conn_t *conn); void gcs_get_status(gcs_conn_t* conn, gu::Status& status); /*! A node with this name will be treated as a stateless arbitrator */ #define GCS_ARBITRATOR_NAME "garb" #endif // _gcs_h_ galera-3-25.3.20/gcs/src/gcs_node.hpp0000644000015300001660000001055413042054732017000 0ustar jenkinsjenkins/* * Copyright (C) 2008-2016 Codership Oy * * $Id$ */ /*! * Node context */ #ifndef _gcs_node_h_ #define _gcs_node_h_ #include #include "gcs.hpp" #include "gcs_defrag.hpp" #include "gcs_comp_msg.hpp" #include "gcs_state_msg.hpp" #define NODE_NO_ID "undefined" #define NODE_NO_NAME "unspecified" #define NODE_NO_ADDR "unspecified" struct gcs_node { gcs_defrag_t app; // defragmenter for application actions gcs_defrag_t oob; // defragmenter for out-of-band service acts. // globally unique id from a component message char id[GCS_COMP_MEMB_ID_MAX_LEN + 1]; // to track snapshot status char joiner[GCS_COMP_MEMB_ID_MAX_LEN + 1]; char donor [GCS_COMP_MEMB_ID_MAX_LEN + 1]; const char* name; // human-given name const char* inc_addr; // incoming address - for load balancer const gcs_state_msg_t* state_msg;// state message gcs_seqno_t last_applied; // last applied action on that node int gcs_proto_ver;// supported protocol versions int repl_proto_ver; int appl_proto_ver; int desync_count; gcs_node_state_t status; // node status gcs_segment_t segment; bool count_last_applied; // should it be counted bool bootstrap; // is part of prim comp bootstrap process }; typedef struct gcs_node gcs_node_t; /*! Initialize node context */ extern void gcs_node_init (gcs_node_t* node, gcache_t* gcache, const char* id, const char* name, ///< can be null const char* inc_addr, ///< can be null int gcs_proto_ver, int repl_proto_ver, int appl_proto_ver, gcs_segment_t segment); /*! Move data from one node object to another */ extern void gcs_node_move (gcs_node_t* dest, gcs_node_t* src); /*! Deallocate resources associated with the node object */ extern void gcs_node_free (gcs_node_t* node); /*! Reset node's receive buffers */ extern void gcs_node_reset (gcs_node_t* node); /*! Mark node's buffers as reset, but don't do it actually (local node only) */ extern void gcs_node_reset_local (gcs_node_t* node); /*! * Handles action message. Is called often - therefore, inlined * * @return */ static inline ssize_t gcs_node_handle_act_frag (gcs_node_t* node, const gcs_act_frag_t* frg, struct gcs_act* act, bool local) { if (gu_likely(GCS_ACT_SERVICE != frg->act_type)) { return gcs_defrag_handle_frag (&node->app, frg, act, local); } else if (GCS_ACT_SERVICE == frg->act_type) { return gcs_defrag_handle_frag (&node->oob, frg, act, local); } else { gu_warn ("Unrecognised action type: %d", frg->act_type); assert(0); return -EPROTO; } } static inline void gcs_node_set_last_applied (gcs_node_t* node, gcs_seqno_t seqno) { if (gu_unlikely(seqno < node->last_applied)) { gu_warn ("Received bogus LAST message: %lld, from node %s, " "expected >= %lld. Ignoring.", seqno, node->id, node->last_applied); } else { node->last_applied = seqno; } } static inline gcs_seqno_t gcs_node_get_last_applied (gcs_node_t* node) { return node->last_applied; } /*! Record state message from the node */ extern void gcs_node_record_state (gcs_node_t* node, gcs_state_msg_t* state); /*! Update node status according to quorum decisions */ extern void gcs_node_update_status (gcs_node_t* node, const gcs_state_quorum_t* quorum); static inline gcs_node_state_t gcs_node_get_status (const gcs_node_t* node) { return node->status; } static inline gcs_seqno_t gcs_node_cached (const gcs_node_t* node) { /* node->state_msg check is needed in NON-PRIM situations, where no * state message exchange happens */ if (node->state_msg) return gcs_state_msg_cached(node->state_msg); else return GCS_SEQNO_ILL; } static inline uint8_t gcs_node_flags (const gcs_node_t* node) { return gcs_state_msg_flags(node->state_msg); } static inline bool gcs_node_is_joined (const gcs_node_state_t st) { return (st >= GCS_NODE_STATE_DONOR); } #endif /* _gcs_node_h_ */ galera-3-25.3.20/gcs/src/gcs_msg_type.hpp0000644000015300001660000000253613042054732017703 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Message types. */ #ifndef _gcs_msg_type_h_ #define _gcs_msg_type_h_ // NOTE! When changing this enumaration, make sure to change // gcs_msg_type_string[] in gcs_msg_type.c typedef enum gcs_msg_type { GCS_MSG_ERROR, // error happened when recv() GCS_MSG_ACTION, // action fragment GCS_MSG_LAST, // report about last applied action GCS_MSG_COMPONENT, // new component GCS_MSG_STATE_UUID,// state exchange UUID message GCS_MSG_STATE_MSG, // state exchange message GCS_MSG_JOIN, // massage saying that the node completed state transfer GCS_MSG_SYNC, // message saying that the node has synced with group GCS_MSG_FLOW, // flow control message GCS_MSG_CAUSAL, // causality token GCS_MSG_MAX } gcs_msg_type_t; extern const char* gcs_msg_type_string[GCS_MSG_MAX]; /* Types of private actions - should not care, * must be defined and used by the application */ /* Types of regular configuration mesages (both PRIM/NON_PRIM) */ typedef enum gcs_reg_type { GCS_REG_JOIN, // caused by member JOIN GCS_REG_LEAVE, // caused by member LEAVE GCS_REG_DISCONNECT, // caused by member DISCONNECT GCS_REG_NETWORK // caused by NETWORK failure? } gcs_reg_type_t; #endif // _gcs_message_h_ galera-3-25.3.20/gcs/src/gcs_fc.hpp0000644000015300001660000000367613042054732016452 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy * * $Id$ */ /*! @file This unit contains Flow Control parts deemed worthy to be * taken out of gcs.c */ #ifndef _gcs_fc_h_ #define _gcs_fc_h_ #include #include #include typedef struct gcs_fc { ssize_t hard_limit; // hard limit for slave queue size ssize_t soft_limit; // soft limit for slave queue size, after it FC kicks in double max_throttle; // limit on how much we can throttle replication ssize_t init_size; // initial queue size ssize_t size; // current queue size ssize_t last_sleep; // queue size when last sleep happened ssize_t act_count; // action count double max_rate; // normal replication data rate (byte/s) double scale; // data rate scale factor double offset; // data rate offset (rate = scale*size + offset) long long start; // beginning of the time interval (nanosec, monotonic) long debug; // how often to print debug messages, 0 - never ssize_t sleep_count; double sleeps; } gcs_fc_t; extern double const gcs_fc_hard_limit_fix; //! allow for some overhead /*! Initializes operational constants before oprning connection to group */ extern int gcs_fc_init (gcs_fc_t* fc, ssize_t hard_limit, // hard limit double soft_limit, // soft limit as a fraction of hard limit double max_throttle); /*! Reinitializes object at the beginning of state transfer */ extern void gcs_fc_reset (gcs_fc_t* fc, ssize_t queue_size); /*! Processes a new action added to a slave queue. * @return nanoseconds to sleep or -ENOMEM in case of hitting * hard limit or GU_TIME_ETERNITY to pause forever */ extern long long gcs_fc_process (gcs_fc_t* fc, ssize_t act_size); /*! Print debug info every debug_level'th call to gcs_fc_process. */ extern void gcs_fc_debug (gcs_fc_t* fc, long debug_level); #endif /* _gcs_fc_h_ */ galera-3-25.3.20/gcs/src/gcs_seqno.hpp0000644000015300001660000000064413042054732017177 0ustar jenkinsjenkins/* * Copyright (C) 2008-2012 Codership Oy * * $Id$ */ /* * Operations on seqno. */ #ifndef _gcs_seqno_h_ #define _gcs_seqno_h_ #include "galerautils.h" #include "gcs.hpp" #define gcs_seqno_le(x) ((gcs_seqno_t)gu_le64(x)) #define gcs_seqno_be(x) ((gcs_seqno_t)gu_be64(x)) #define gcs_seqno_htog(x) ((gcs_seqno_t)htog64(x)) #define gcs_seqno_gtoh gcs_seqno_htog #endif /* _gcs_seqno_h_ */ galera-3-25.3.20/gcs/src/gcs_defrag.cpp0000644000015300001660000001547413042054732017304 0ustar jenkinsjenkins/* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include "gcs_defrag.hpp" #include #include #include #define DF_ALLOC() \ do { \ df->head = static_cast(gcs_gcache_malloc (df->cache, df->size)); \ \ if(gu_likely(df->head != NULL)) \ df->tail = df->head; \ else { \ gu_error ("Could not allocate memory for new " \ "action of size: %zd", df->size); \ return -ENOMEM; \ } \ } while (0) /*! * Handle action fragment * * Unless a whole action is returned, contents of act is undefined * * In order to optimize branch prediction used gu_likely macros and odered and * nested if/else blocks according to branch probability. * * @return 0 - success, * size of action - success, full action received, * negative - error. * * TODO: this function is too long, figure out a way to factor it into several * smaller ones. Note that it is called for every GCS_MSG_ACTION message * so it should be optimal. */ ssize_t gcs_defrag_handle_frag (gcs_defrag_t* df, const gcs_act_frag_t* frg, struct gcs_act* act, bool local) { if (df->received) { /* another fragment of existing action */ df->frag_no++; /* detect possible error condition */ if (gu_unlikely((df->sent_id != frg->act_id) || (df->frag_no != frg->frag_no))) { if (local && df->reset && (df->sent_id == frg->act_id) && (0 == frg->frag_no)) { /* df->sent_id was aborted halfway and is being taken care of * by the sender thread. Forget about it. * Reinit counters and continue with the new action. */ gu_debug ("Local action %lld, size %ld reset.", frg->act_id, frg->act_size); df->frag_no = 0; df->received = 0; df->tail = df->head; df->reset = false; if (df->size != frg->act_size) { df->size = frg->act_size; #ifndef GCS_FOR_GARB if (df->cache !=NULL) { gcache_free (df->cache, df->head); } else { free ((void*)df->head); } DF_ALLOC(); #endif /* GCS_FOR_GARB */ } } else if (frg->act_id == df->sent_id && frg->frag_no < df->frag_no) { /* gh172: tolerate duplicate fragments in production. */ gu_warn ("Duplicate fragment %lld:%ld, expected %lld:%ld. " "Skipping.", frg->act_id, frg->frag_no, df->sent_id, df->frag_no); df->frag_no--; // revert counter in hope that we get good frag assert(0); return 0; } else { gu_error ("Unordered fragment received. Protocol error."); gu_error ("Expected: %llu:%ld, received: %llu:%ld", df->sent_id, df->frag_no, frg->act_id, frg->frag_no); gu_error ("Contents: '%.*s'", frg->frag_len, (char*)frg->frag); df->frag_no--; // revert counter in hope that we get good frag assert(0); return -EPROTO; } } } else { /* new action */ if (gu_likely(0 == frg->frag_no)) { df->size = frg->act_size; df->sent_id = frg->act_id; df->reset = false; #ifndef GCS_FOR_GARB DF_ALLOC(); #else /* we don't store actions locally at all */ df->head = NULL; df->tail = df->head; #endif } else { /* not a first fragment */ if (!local && df->reset) { /* can happen after configuration change, just ignore this message calmly */ gu_debug ("Ignoring fragment %lld:%ld (size %d) after reset", frg->act_id, frg->frag_no, frg->act_size); return 0; } else { ((char*)frg->frag)[frg->frag_len - 1] = '\0'; gu_error ("Unordered fragment received. Protocol error."); gu_error ("Expected: any:0(first), received: %lld:%ld", frg->act_id, frg->frag_no); gu_error ("Contents: '%s', local: %s, reset: %s", (char*)frg->frag, local ? "yes" : "no", df->reset ? "yes" : "no"); assert(0); return -EPROTO; } } } df->received += frg->frag_len; assert (df->received <= df->size); #ifndef GCS_FOR_GARB assert (df->tail); memcpy (df->tail, frg->frag, frg->frag_len); df->tail += frg->frag_len; #else /* we skip memcpy since have not allocated any buffer */ assert (NULL == df->tail); assert (NULL == df->head); #endif #if 1 if (df->received == df->size) { act->buf = df->head; act->buf_len = df->received; gcs_defrag_init (df, df->cache); return act->buf_len; } else { return 0; } #else /* Refs gh185. Above original logic is preserved which relies on resetting * group->frag_reset when local action needs to be resent. However a proper * solution seems to be to use reset flag of own defrag channel (at least * it is per channel, not global like group->frag_reset). This proper logic * is shown below. Note that for it to work gcs_group_handle_act_msg() * must be able to handle -ERESTART return code. */ int ret; if (df->received == df->size) { act->buf = df->head; act->buf_len = df->received; if (gu_likely(!df->reset)) { ret = act->buf_len; } else { /* foreign action should simply never get here, only local actions * are allowed to complete in reset state (to return -ERESTART) to * a sending thread. */ assert(local); ret = -ERESTART; } gcs_defrag_init (df, df->cache); // this also clears df->reset flag assert(!df->reset); } else { ret = 0; } return ret; #endif } galera-3-25.3.20/gcs/src/gcs_dummy.hpp0000644000015300001660000000225413042054732017204 0ustar jenkinsjenkins/* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * Dummy backend specification */ #ifndef _gcs_dummy_h_ #define _gcs_dummy_h_ #include "gcs_backend.hpp" #include "gcs_comp_msg.hpp" extern GCS_BACKEND_REGISTER_FN (gcs_dummy_register); extern GCS_BACKEND_CREATE_FN (gcs_dummy_create); #ifdef GCS_DUMMY_TESTING /* * What follows is an API for unit testing */ /*! Injects a message in the message queue to produce a desired msg sequence. */ extern long gcs_dummy_inject_msg (gcs_backend_t* backend, const void* msg, size_t len, gcs_msg_type_t type, long sender_idx); /*! Sets the new component view. * The same component message should be injected in the queue separately * (see gcs_dummy_inject_msg()) in order to model different race conditions */ extern long gcs_dummy_set_component (gcs_backend_t* backend, const gcs_comp_msg_t* comp); /*! Is needed to set transitional state */ extern long gcs_dummy_set_transitional (gcs_backend_t* backend); #endif /* GCS_DUMMY_TESTING */ #endif /* _gcs_dummy_h_ */ galera-3-25.3.20/gcs/src/gcs_sm.cpp0000644000015300001660000001143613042054732016465 0ustar jenkinsjenkins/* * Copyright (C) 2010 Codership Oy * * $Id$ */ /*! * @file GCS Send Monitor. To ensure fair (FIFO) access to gcs_core_send() */ #include "gcs_sm.hpp" #include static void sm_init_stats (gcs_sm_stats_t* stats) { stats->sample_start = gu_time_monotonic(); stats->pause_start = 0; stats->paused_ns = 0; stats->paused_sample = 0; stats->send_q_samples = 0; stats->send_q_len = 0; stats->send_q_len_max = 0; stats->send_q_len_min = 0; } gcs_sm_t* gcs_sm_create (long len, long n) { if ((len < 2 /* 2 is minimum */) || (len & (len - 1))) { gu_error ("Monitor length parameter is not a power of 2: %ld", len); return NULL; } if (n < 1) { gu_error ("Invalid monitor concurrency parameter: %ld", n); return NULL; } size_t sm_size = sizeof(gcs_sm_t) + len * sizeof(((gcs_sm_t*)(0))->wait_q[0]); gcs_sm_t* sm = static_cast(gu_malloc(sm_size)); if (sm) { sm_init_stats (&sm->stats); gu_mutex_init (&sm->lock, NULL); #ifdef GCS_SM_GRAB_RELEASE gu_cond_init (&sm->cond, NULL); sm->cond_wait = 0; #endif /* GCS_SM_GRAB_RELEASE */ sm->wait_q_len = len; sm->wait_q_mask = sm->wait_q_len - 1; sm->wait_q_head = 1; sm->wait_q_tail = 0; sm->users = 0; sm->users_max = 0; sm->users_min = 0; sm->entered = 0; sm->ret = 0; #ifdef GCS_SM_CONCURRENCY sm->cc = n; // concurrency param. #endif /* GCS_SM_CONCURRENCY */ sm->pause = false; sm->wait_time = gu::datetime::Sec; memset (sm->wait_q, 0, sm->wait_q_len * sizeof(sm->wait_q[0])); } return sm; } long gcs_sm_close (gcs_sm_t* sm) { gu_info ("Closing send monitor..."); if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); sm->ret = -EBADFD; if (sm->pause) _gcs_sm_continue_common (sm); gu_cond_t cond; gu_cond_init (&cond, NULL); // in case the queue is full while (sm->users >= (long)sm->wait_q_len) { gu_mutex_unlock (&sm->lock); usleep(1000); gu_mutex_lock (&sm->lock); } while (sm->users > 0) { // wait for cleared queue sm->users++; GCS_SM_INCREMENT(sm->wait_q_tail); _gcs_sm_enqueue_common (sm, &cond, true); sm->users--; GCS_SM_INCREMENT(sm->wait_q_head); } gu_cond_destroy (&cond); gu_mutex_unlock (&sm->lock); gu_info ("Closed send monitor."); return 0; } long gcs_sm_open (gcs_sm_t* sm) { long ret = -1; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); if (-EBADFD == sm->ret) /* closed */ { sm->ret = 0; } ret = sm->ret; gu_mutex_unlock (&sm->lock); if (ret) { gu_error ("Can't open send monitor: wrong state %d", ret); } return ret; } void gcs_sm_destroy (gcs_sm_t* sm) { gu_mutex_destroy(&sm->lock); gu_free (sm); } void gcs_sm_stats_get (gcs_sm_t* sm, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg, long long* paused_ns, double* paused_avg) { gcs_sm_stats_t tmp; long long now; bool paused; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); *q_len_max = sm->users_max; *q_len_min = sm->users_min; *q_len = sm->users; tmp = sm->stats; now = gu_time_monotonic(); paused = sm->pause; gu_mutex_unlock (&sm->lock); if (paused) { // taking sample in a middle of a pause tmp.paused_ns += now - tmp.pause_start; } *paused_ns = tmp.paused_ns; if (gu_likely(tmp.paused_ns >= 0)) { *paused_avg = ((double)(tmp.paused_ns - tmp.paused_sample)) / (now - tmp.sample_start); } else { *paused_avg = -1.0; } if (gu_likely(tmp.send_q_len >= 0 && tmp.send_q_samples >= 0)){ if (gu_likely(tmp.send_q_samples > 0)) { *q_len_avg = ((double)tmp.send_q_len) / tmp.send_q_samples; } else { *q_len_avg = 0.0; } } else { *q_len_avg = -1.0; } } void gcs_sm_stats_flush(gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); long long const now = gu_time_monotonic(); sm->stats.sample_start = now; sm->stats.paused_sample = sm->stats.paused_ns; if (sm->pause) // append elapsed pause time { sm->stats.paused_sample += now - sm->stats.pause_start; } sm->stats.send_q_len = 0; sm->stats.send_q_len_max = 0; sm->stats.send_q_len_min = 0; sm->stats.send_q_samples = 0; sm->users_max = sm->users; sm->users_min = sm->users; gu_mutex_unlock (&sm->lock); } galera-3-25.3.20/gcs/src/gcs_fifo_lite.hpp0000644000015300001660000001131013042054732020002 0ustar jenkinsjenkins/* * Copyright (C) 2008-2011 Codership Oy * * $Id$ * * FIFO "class" customized for particular purpose * (here I decided to sacrifice generality for efficiency). * Implements fixed size "mallocless" FIFO (read "ring buffer"). * Except gcs_fifo_create() there are two types of fifo * access methods - protected and unprotected. Unprotected * methods assume that calling routines implement their own * protection, and thus are simplified for speed. */ #ifndef _GCS_FIFO_LITE_H_ #define _GCS_FIFO_LITE_H_ #include #include #include #include #include #include #include "gcs.hpp" typedef struct gcs_fifo_lite { long length; ulong item_size; ulong mask; ulong head; ulong tail; long used; bool closed; bool destroyed; long put_wait; long get_wait; gu_cond_t put_cond; gu_cond_t get_cond; gu_mutex_t lock; void* queue; } gcs_fifo_lite_t; /* Creates FIFO object. Since it practically consists of array of (void*), * the length can be chosen arbitrarily high - to minimize the risk * of overflow situation. */ gcs_fifo_lite_t* gcs_fifo_lite_create (size_t length, size_t item_size); void gcs_fifo_lite_close (gcs_fifo_lite_t* fifo); void gcs_fifo_lite_open (gcs_fifo_lite_t* fifo); long gcs_fifo_lite_destroy (gcs_fifo_lite_t* fifo); static inline void* _gcs_fifo_lite_tail (gcs_fifo_lite_t* f) { return ((char*)f->queue + f->tail * f->item_size); } static inline void* _gcs_fifo_lite_head (gcs_fifo_lite_t* f) { return ((char*)f->queue + f->head * f->item_size); } #define GCS_FIFO_LITE_LOCK \ if (gu_unlikely (gu_mutex_lock (&fifo->lock))) { \ gu_fatal ("Mutex lock failed."); \ abort(); \ } /*! If FIFO is not full, returns pointer to the tail item and locks FIFO, * otherwise blocks. Or returns NULL if FIFO is closed. */ static inline void* gcs_fifo_lite_get_tail (gcs_fifo_lite_t* fifo) { void* ret = NULL; GCS_FIFO_LITE_LOCK; while (!fifo->closed && fifo->used >= fifo->length) { fifo->put_wait++; gu_cond_wait (&fifo->put_cond, &fifo->lock); } if (gu_likely(!fifo->closed)) { assert (fifo->used < fifo->length); ret = _gcs_fifo_lite_tail (fifo); } else { gu_mutex_unlock (&fifo->lock); } return ret; } /*! Advances FIFO tail and unlocks FIFO */ static inline void gcs_fifo_lite_push_tail (gcs_fifo_lite_t* fifo) { fifo->tail = (fifo->tail + 1) & fifo->mask; fifo->used++; assert (fifo->used <= fifo->length); if (fifo->get_wait > 0) { fifo->get_wait--; gu_cond_signal (&fifo->get_cond); } gu_mutex_unlock (&fifo->lock); } /*! If FIFO is not empty, returns pointer to the head item and locks FIFO, * or returns NULL if FIFO is empty. Blocking behaviour disabled since * it is not needed in GCS: recv_thread should never block. */ static inline void* gcs_fifo_lite_get_head (gcs_fifo_lite_t* fifo) { void* ret = NULL; GCS_FIFO_LITE_LOCK; /* Uncomment this for blocking behaviour while (!fifo->closed && 0 == fifo->used) { fifo->get_wait++; gu_cond_wait (&fifo->get_cond, &fifo->lock); } */ if (gu_likely(fifo->used > 0)) { ret = _gcs_fifo_lite_head (fifo); } else { gu_mutex_unlock (&fifo->lock); } return ret; } /*! Advances FIFO head and unlocks FIFO */ static inline void gcs_fifo_lite_pop_head (gcs_fifo_lite_t* fifo) { fifo->head = (fifo->head + 1) & fifo->mask; fifo->used--; assert (fifo->used != -1); if (fifo->put_wait > 0) { fifo->put_wait--; gu_cond_signal (&fifo->put_cond); } gu_mutex_unlock (&fifo->lock); } /*! Unlocks FIFO */ static inline long gcs_fifo_lite_release (gcs_fifo_lite_t* fifo) { return (gu_mutex_unlock (&fifo->lock)); } /*! Removes item from tail, returns true if success */ static inline bool gcs_fifo_lite_remove (gcs_fifo_lite_t* const fifo) { bool ret = false; assert (fifo); GCS_FIFO_LITE_LOCK; if (fifo->used) { fifo->tail = (fifo->tail - 1) & fifo->mask; fifo->used--; ret = true; if (fifo->put_wait > 0) { fifo->put_wait--; gu_cond_signal (&fifo->put_cond); } } gu_mutex_unlock (&fifo->lock); return ret; } static inline bool gcs_fifo_lite_not_full (const gcs_fifo_lite_t* const fifo) { return (fifo->used < fifo->length); } #endif /* _GCS_FIFO_LITE_H_ */ galera-3-25.3.20/gcs/doc/0000755000015300001660000000000013042054732014457 5ustar jenkinsjenkinsgalera-3-25.3.20/gcs/doc/GCS_connection_states.txt0000644000015300001660000001177413042054732021450 0ustar jenkinsjenkins GCS CONNECTION STATES (from the application viewpoint) Since GCS is a library to be utilized by an application, it has to export some sort of a Group handle to the application. So far this handle was attempted to have a connection-oriented socket semantics. Reasons for that being: 1) It is better to expand on a well understood and established concept rather than invent something. 2) The whole idea of GCS is to avoid exporting Group Communication concepts to application. It is much easier to work with a socket. 3) The main point of the Group is a linearly serialized stream of messages with a Group being a single source/sink of the messages. This effectively makes Group communication a point-to-point connection. Initially this seemed rather plain to me: when we're part of the primary configuration, we can send and receive messages. When not - all calls just return -ENOTCONN. However, there are certain aspects to GC that make its interpretation as a socket not so straightforward. These are configuration changes, primary/non-primary configurations and state snapshot. For the demo these were deemed not essential and were not addressed. As we're moving on this has to be addressed since we have to settle the API the sooner the better. Basically it goes this way. Whenever DBMS process joins the primary configuration any other way than by configuration change from the previous primary configuration, it has to take a state snapshot (be it incremental or complete) and only after that it can be considered a part of the quorum. It could be done the following way: 1) Process receives configuration change message and decides whether it needs to take a state snapshot. 2) If "yes" then it sends snapshot request message. One of quorum members is dispatches snapshot to the joiner. 3) When the snapshot is complete, the joiner sends the final join message. (maybe "join" is not a good term here, but I'll use it just for now) 4) When the join message is received, every configuration member puts the process in the quorum member list. Only now the process is a full-fledged member of the Group. Note that I've been speaking of two separate memberships here: "configuration" and "quorum". A process is a member of the configuration as soon as it receives a configuration change message (bear in mind, I'm assuming Spread as a communication backend now), so it can receive and theoretically - send messages. However, it does not have the up-to-date state and in case of DBMS: 1) Cannot really apply messages (write sets in our case). 2) Cannot give a snapshot in case of another configuration change, so it cannot be used in quorum calculation. All this makes the process a sort of the "2nd grade" configuration member until it gets a snapshot. The problem is that every configuration member has to be notified when the snapshot is complete, hence we need this "join" message. As a result, state machine for the GCS connection will get one more state: own JOIN message received +-------------------------+ ______ | V V \ gcs_open() +----------+ +---------------+ | conf. -------------->| GCS_OPEN | | GCS_CONNECTED | | change +----------+ +---------------+ | to PRIM ^ | \______/ +-------------------------+ own LEAVE message received, conf. change to NON-PRIM Rough explanation: GCS_OPEN (perhaps should be split into OPEN_PRIM and OPEN_NON_PRIM). Only snapshot request and join messages are allowed. Attempt to send anything else results in -ENOTCONN. Attempt to send join message when in non-primary configuration should result in -ECONNREFUSED. GCS_CONNECTED. Attempt to send snapshot request or join message results in -EISCONN. Application messages are sent alright. When GCS_CONNECTED->GCS_OPEN change happens all pending GCS calls return -ECONNRESET. So GCS API is about to get more complicated. And here we have two alternatives: 1) Implicitly expose GCS connection state to the application through those error codes. Application will have to keep its own track of GCS connection state and not forget to send join message. In this case API can stay the same, but it's usage will get a bit more complicated. 2) Application can provide request_snapshot(), send_snapshot() and receive_snapshot() callbacks to the library. Then all this could be handled by the library and application would not have to know anything about snapshot request or join messages. This won't simplify the application much though: callbacks will have to be able to communicate and synchronize with other threads, since in this case application will have no control on when the send or receive callback is called. This also would mean additional 4 parameters for gcs_open() (3 callbacks + context) and make GCS connection much less of a "socket". galera-3-25.3.20/gcs/doc/GCS_Architecture.png0000644000015300001660000010240613042054732020306 0ustar jenkinsjenkinsPNG  IHDR[bKGD pHYsaa?itIME  L  IDATxwTY7HoR" X@Ab]u]{Rm ( tP=fxdrgd3wJXl6%EpppLppܹsUUU9ClllwVw OeK"Ç{zz}|ܹl6{ԨQ㗔HHT5˪"<ɒʖ;7xବ۷_FFݻw'NdFx捲2G>ݓ'OpM"<ɒʖ;@*gеkPmmm?>}t6fa6ϟ?o>77&+R+\eKp@=z444t<ɀƍ3wXÃ͛=0((_~ݳgϘ999kܸ/hȐ!Lᒒbbb,//oii|0$$յiӦcƌQPP055b>RTׯ'NTSSSPP>|x||9)] ō;VYYYYYyĈ?.wwʘ7$~@LݻweĚ5k233W^MDOs퉉ϟ'ۗ./333"͛7ؘfkjj2ܰaCjjjHHk޽[V$11Yf666qqq]t4'7hР/^hjjŕpx1˾}^p!777(()l STT$"c\pwCd6AD̮lv~~>5jHpбcVZ좢""d"hǎ#}nqvv&@͉0.[$`^D4mڴrA3&~)--sׯ_# |AII n) 999cr޽{666KGD[nEtttP9@b!"MMrgLy+PZZz̙o߾EC1?‡/rrr*rAVV֨QԮ]V HFFHZZsRn5yyy‡A3VysNӦM#F[86l0|U iܸ1={LtҫW^paĈeID-[ĻR8@2m~آw5 @jڕj}`Q#))^J$dR T8~ ΦV)P.AX8@b@uD:E@bԯ|P<@8@ UJ .g,ϼ٣[Wx>K10eD "uH y l89$J Ed@>@w#]8@ D֑ N%b  ,FFW| ,B3`ݱXP ,˘ XPx " @Ât4D<@8B @E  k"b93e9c'Otյ^¾t4&僄D#Vs\wўY<r2:彬p N`~Ŏ_|Y,&# ^ٱoo2nsR2Ý[p}rR ͛Զ5[D{hqWZZaqPU';G:gVeyw\uZgqHkDGGNj`ԃ]gq=pEz(,r'G>mK-W'9鞢L㌼G{-#Ohb/֡N+@=0}ǺuC,-J/tv>#/#Gv{J6VWn2b԰dzmw5S7hl8#|J mj/+%$jqʐο;]v~I /*)bI5iCDrjT6k9o55eQBX,66%$\~ƍ1c܇…_qt<2dȁ+oUPPlkۖ{_=!7gMs\&IX_]#}ZQ\Rļn=I'];zf3پM/ѥK[ddX&&krkP <(苕68@8QZ#x3)ww;xxLx1Ï֓'=qb/r*.ޡ;l ۻ/ύ; l}Fc|&e{p?^k2\,jQIFnC^JJ˞J:yx^k}f^6=<|͛˾}8ݻy}Di$޽Y{-GLj\UMMС ۷張{RӦJ{8 s>;|]m}=㈔L"JI|4-JPy+=]k$%V4386 OXLNF[V"q__Ť|v}ʲ'%"Y,y岋Ri9`5lЗhe'6b^v7,+A<y{qI(݌˪֡C3%Lv+?2ZJZ @QybnYǣ?& Y)GEmiZiu z`3D%ю 43CڷFB[1GE)K+Ykpq+'?cQ+Q9 U!wf…}z-k1ccKxZRjjVbbř3{ Y{y8:Z` bk|,.-qvMlO?\=2}2RryY|*.)z:3868\|||݄ +((ݸ[7I[[ji)z&[?99\TT8>~;󖥥̙gK.׵kK2:(($++YY)w} ?\ ~df9%pmi4E-\U 2sӟ_vuq: !!iNBBV.UPw+p'>|H(((nbeX3-8]~gXu*.e?P˹SLO<Νc (99shݻDH}f|W= CCVJAiyCczi3і w>lO®AGҒAD9})'7Q>̕& ZA 8CӦJ7na>sfZ5M@$B /L 4\|sW$ 4|#"Ͽx/ ".LjpP" ЀPLl@], "4$U@DQ"hX,g,%h4:@A]\Wl+4 Jn6zi;HD]lV`7$0}VCR\8dPg! "4],  "@H|dp$QoP@2@8j8PO ڻ #@tD <"I$|"b "b"v$T~gPD@J@Dw\*yY8F, {oJ;0}~:Ƣ-A%VFq |ܛ~g ! ~.)AD ".>)_x#"@u䶎}Fj4o]D(\PGܿ٣<zֽ6<)eG"zYfz'\_q_: &:?.n|IMɑ#hڝMϜ.yףGdz.82 ٿgDϞ;wO9wqqts }ϙ3nn9erc>W|˗O~TA!G SY, :SV\ڵb~;{:澆^c <@Sg VOd2l/ML[z☿?ڵʍ9FFm_[˷ QB8osfZ<}}9#.-#㻉*wVP^Pjfk?g,$L-.Ci>x澆"նmY/]˹499gכU^^{ȤIΧNmܸyep乎t!reϧRVV){hP+pZb3э} CgeRhν <6L[bKtg2QR[s"N4X`UNN39CԚ4--{ȨQ||$=|=t;҅T-o+7 @]%,Evj7^ECc ;'ٵ#YSzl Eh-OLJDRRR{ضm瑴"4hܜѠA/_>]\\ں)**;_?DP@^^C"[dzzjPKfNcJII1\"h"JJ'N1*,,Xh'N "KKr4@8:} ?XC }^:ڢw E <"7'Νخ]S,Y2}{/\ǫW&&*ӦܰaX5 Hi܉ e>3Jve]dddرo!ÇL])3'->bz R:u%Nt8U(5QGsC̉C>} 0A݁ 6tw՝9Y aF{MH;]^>(..SlW"||`1\>4n_*DƼE~7Ϯh@ cދ?a~z]IBcuSq2wO1Xg7B/H)y%!_M^sI ~7ONY{E؏!7ZT?bҽ%"p~z+܉('7NX:F IDATxrdc L=:uGDmڍ~JPiu?". g DOM[oJw"!8c~xyyBz◧W܂ksIڋDHJ˪k1\>X]SQ׏}33=yn=iC]/B=6B,KS5OS$~vU9rD ࿯sSBwܗx-y 3"BE,7(N+@sZ}f4؏N^JDfdlv.*okmzdpM\䛮v5Zeg(k-9쟷LbJ*aPGu2yچ9YۧwHKkWR^VUD@8"-Ҵ˶)Veg4bQeXHpbb996z=wZ䐙k+]"h%D}uJ:`5| e4c\ tvAv GdڭטY]K4ŗ^f[@Ǫ=j֥ @<#N4T:LdqXQA/sKQA>3NKGͶZ쒒nkjϵ;pxvF:O.a61#VWInywh7JzCz\kVқwg΀~Qm%w1S< ߩG:r'NnN+@`킚h cZ_d`֍gv?*#6"dsm/Y<}DM ԪȝONMVa9_:mp[ fTqeO2c)楯39`׮sf>2v0lUC%%;wӚbQDD&EG0t||3S䞴'KK~ћ7qUTٺ8p"UR9%,,a뺺*;6t)yz$"?*/0&&͛-ZegopKAAz3"̋Lv޽}|=x֯͛oHCC?ƍPÅDt!C3e޾_6ν͛gF 16TR% |`n͕bA?Ǐaa &&Zܓy󍕕>3Bddr``VXX̙=tbݻ ]~O!P}c]PGj]-X䈈$GqӧKeloNjrF\>z⒣G}G\2++6m I,zr[`` 5j$ҳVc^ \PVm{L|/PkV< aumTp  O@81C" j秖NJ~N+TƻkFsF)B*1j pf+]=ʸ0?W$yn%rgU߄UUMm5ق^s@&uL$mQb7]Z*^q:V <k;(+T ;ۧ%(*?aAC_[;AˡP3aN@_=)̼e}O%WW m9F)|+^>Ӿ)iYC]()côor3z_LuZzvJT%*337ťcp_cfmlu|k'h9Trxf\u NY0yq"E&.޿wU\ʷP7v a{2`,$$v>tN^&dĜ-w/*cxi[nDqudâ=7Ѷ> IJv1jfvFz:n遦 dڍэD+Fۦgj!)%m=l*gZ*3z;̑UP1l7q3;2rDȬFv"2TgφPZWeIF}ǻ)6ȌID&]у'>B^IE/l]: /8X8ePO~ gj5y}ČgwKJ~eG6KxPKu3#3&uk/~ꯅÖ 4܅D=hފؘD$)%MD,f(%-[RR\*0h-6nw=9wd.Vha=o.JZ*LM9"߬fwXطIA^3<G@e5T#[9f[Is6q.YRRZJFVInvϰ'2gA03\X_uacu-i9rqQ!g'ܸ~C]޻L-uiY?V44f=%DGާ?g]R|E{ן;xB"Wwϭ^30)9y9LWALx5gLA1ly?{-w|kw9TT.1\e~rmw&ȼC׏0Ye[gE),'|+^ `ͮh2 "V%>,NfV3{G{o~vmdHDr'u~YMO!.y\ȹ[Yhr|[gUVfKi1c hmч{>GDͷ™"]DTZ S[nRLω_?r=xH箻s:2z{(xیG'Tr_O"@Uܚ`+k~v^ Yd5l6꯴/f=qnF2pX۠bnHKTVf8k?qKO|bq?b2 </\DV[ 6O4_bc`~6o΅4HJY5Dj X DXGlRgqt `8=p 2@8@8U µ#p NU8ѣ E/cJ{cwKR =Xy ;ͣje8 ҥt˖-===633ڵkXX3|||ڴiΝ;:t655r 8...O>eUPPprrBhpNAdQ$}}}srr8=zǯ\}7Y---}~)''S^^ݻl6[WW.]mۖlvhhk5@+!@F8.h׮ݻwPZZSl^vݹsg"`$77W^^2bĈm۶0oرٳg222%%%L@^^bQQlrϞ=#F@ԫHPGF_qA"E_ziӦϟsȐ, QUUMIIgdd[S!!!f͒YrebSP"ɓ'FONN9m!|9L`hii)((;[nɒ%ǏǗ P'i>|8iҤFqo۶Ç/ҒtӧOa>ػw۷oX_*@ᩩSL!,fyVXYTTtȑ޽{BppիW_|!_ѪU,YPXXhѢ%K0)b(..X,Vpp0npDue={nܸalld[۷ooҤѣlƍt}rŋRRR:tȖ3K":v영A=zLq4V@:q@հBYqO+x#p5 p @ @wx#P Ka &|W.)X9Ckد 5QWAR$3&jW1쑫X,@ FU_dP7SBd8@2q=z* E>\Dp8@2}*ׅP  :bԑ7Fz @$1oT}#ӐIE; H ~[@Ujar @E&)@Ì z[a`8@ +@Ch l@@J@,׎  $b2hP h}Pl;B`x/侧8fևp8 .9@82" U `@81CpBµ#"s~x Ah-P,iA8pȠ;@XΥd~۷"4x`` Up+;KK6UUhN .`#hzhzp  g^jh(]8Lvv]GazwZ$E'^ #PU妮PFfN׮[ybK[ddX&&kx]X,g!b""hj.nȅ7Bnjq: ˿v-j̜͗_]go~j0τ mm.S⯿GD_EX%3"W8_ -N'UXmlKHX[ߦwꖖ[pf_!8۳A>}ڔ6Sz׮]{l lwrӧ۷k quꔿݻ<<\ș3/ǞU-Xp3?^|d7/^|ХKÇw(=Çw%--]"a+ُߣU*h˛7(.>2@AMe߾Gp޼>u7̘]UUqc5k|ykf3gi$-]glddn}ȤU}][WtBVVjo㴵+1gBaa MJπA5L>>utXzz>>׈dӦ&&*;j{zڢD$--S(c.pƌL=ٳaލ4th7==2kRMvG jz"lwIz6fN㈔L"JI|4MOlJ IDAT={>~[NId*nݺС'əֆ7ԩ9}ь$*##f7W ?4*:%%lV?pqqts0pHk..11oUSkKwo(**:zʀ8#*jWcb~*L355Y̨'''>zs˞x7=;JN]ZZrʔn?[r)S!8ݻb@{{smjj s8o驿zmcc(l6%\СQvvKC"Qyb3lb]SSϟSJJmVׅ$GB8:NڝA/|T zm` /hf^G__nڴyL]^BMOAMOԝ{l_ϹOO1Vhbbҵo}-_n7a± JysG>VhZl6;' ;@AAw! 5o]LMJJXϻ7nWn_8/9] hLʕQl]X 5yZP+U& vn &M:nmm\@4=X9yrӉmh/Y2ܹלO\ʇ Eڍs93f gSirELLRSӧONNz>%L IKKNxqcq]++YY)w] ?\ ~dQ~[W7MKK\~XИ<B%^MOAMϟն/[`jzjOhĉK_}76$LnndLHcnV\\vNN.̻RRR>>טRRRD4h˗O,)Ør^Qnn|bWKϘ |ⓤdzzjtgfDӼFQRRNRRmPv+UqSߦW--˾E!7= Vnܘm ^ڳBvقUs::%k̘1w.8谘~"Zbk@ܹ.\ve]dddر:wn $kޔ)¢ߙaө.ué~ShzRpcX襩˜-X\]GUGM+xVel~" oJTPZ\lz·>g e|pZD@,D"p&oFׄ=H ڻ>]uk.F:^&|M7n1e.hP7D/߿_Xsbo?Bнv1Yl/H$ѪU֯ڻwccD;BYƽ{ 6UiS-LrrO)e\3ztЉ Y~GED$}Pju1z굳k׭))C%#3z-W W9u_CcqI ;))]Ii>ӱ,%iiپ\f/ɓ"JH1f 1ŮXq]Qq~NNAQQE%~~Q6]+)9ׯoe,3.99WUMWwكa&Q+KX^^'wo-6%">آzF$vQ+Q#NҡDZwݲeĉ/kaDDGܹ_ yhРv,G#66ǎM%K-iG—-}SZZE_-~k-?t޽FFn2ܼ˙1c̣S[ ==E ڵC}ouT;?hȐ^eK}w/]: 33t`# @9r@"sƍFDDzr ee jFD1`]"ݼd[)Fӧ{(+ˎС'Zip~ctq#ʀ@Ӧٴjq"{=+M\ 06n=yrɓр?ʘbHzݲTZ˯xYlv^ƂEdž\8Sڰ8<<۶w;fs<`)m8r7o.]r&í `k_Op!}{ݝ;ϛw~߾LEE%νvp06 kVGCC˗l33]AS!++qWSrVV~j&Q[;: JJJ6mZjbұo-ZH}|GZZ? EOO3͟d` ߯_hbZz02~oLoؠ# EE FD$.m~ׯ y\t>}Z33w<#XX5kf38}]MڿZukָ=z%><8n_p ցnzkemڛE{#E.}-$[ތё/jr;fm[Oϗ*ׯڱc͛W{hQccs^?x֯͛oM{Clbߺ&7ŀ_/^_$uydDgyyG)Q_ FD,,,;3pƌ*ܣ]]l);q X%%ϟSVұcKZibqƣG͙G8]]ٽ ^`%̜#8KΛ-,ܽ9nkKD߳%%%SS.]e.tfo8xEF&Ƙhd>}:QQ)1M..ysI+1񫼼祏5==i/2Sx6l,ޕ+c͚N|8E 5V##^30QQV]?{vw%ʲl6NN'w7S^зY+?ԩΝ;?;. 8tt~8oݡVhh-P^w} KB/4\mڬ=gۛ7Si/ύLnFKR 4>$ܜ$$s7cjj2[(79ƍ/ v(hP 4\]nuro"<<:DM7#U1.Ē+Vl 32R;w…k.8Q;֖ܼ9uMDffDdaт)m?^51Q6m {xizN71= ~*qZ-N%n^|rτ-,,_h\m6pJYXxo.^v~Oud˂'H@EqzתjjkqƐ !uC߼_?3-ٟ}ѡԷ nɏHug|2k?_9ooW0 \*_I_%^۳SWUtt XbP!D}JnөCzXY[bOЀI&{-m"\jϽw!ޓ7ٰ"*oTl+4Z&MQ(KU^[}qxMU\k~+X2SWҷ"!zڢVLXBږVJ˜y  ͎AͼsEsM !_t֧)5׆LU9`K]OkSx2v K,L19$Rd iJZMm}WP%'\9O}>uΈ9ٙ'/ 8%㢥ycQ:O|)R/6Aq}!īFyjlD"/5 wҨG|%/7q?WؿSWI3X<ލy9Q;6hBZF=5_{B^>{ן\MMzTƾG}?n GM2R_~_-We_WcڗBTrr{ BcTrxoѰu~W]+\7~5K+]b^Oիܻwk6xgԜFmzh/EScYuQy/3?M}ե_ g hw¥~g\cyjUz M;~o!hHJ9x`zRSS^\ի8pѣGQƉ'T3\vJrϞ=...vvvRemmeU)))=zprrRsAJ(X8Mvvv׮]SMJǏ ̓^f͚ݸqCTmVTϐqFxaiiYp833S㿴W@e\\\f \`ĉ/]f͚uJsoڽ{L2|pRYjs)+WH;RYNgϦ/[rʪ0wvYakkoAJ(p8P*ӧOoӦBP}bQ=zVX1fR9lذy/_]4>hFiIwE8RTl qV¤vvv cjOQ* 6TZ;C*0,,ud„M\RW=z,ͦ=E׫Wʕ+ tuuwի...dd|BDEEIOV^T*G1]*QF͝;7777((VZDEE !"""Ti׮jS}w !-THHH~;U\\祕Q*>T-NZthT^ GBB2eٙp@8T.Kh;qDӦM׭[oɓ']]]7oޜ*pʕ=zOJJjݺ (ޫO>]rĉ~ںN:ҭ))){}7o, ϣ]```_{wI kk} ,}"6+k.\ްaóg֯_„ 5٭]N: t?mّ֬#GT7٢4ԧԬY֭[FGG_U\@i$ m۶M.[R${(ihK~*3f̘#F\z533sҥN>_+䠠 {{{S n݊2d"55UѫW FDDmڴ]]KQ 9 NG8(fxz@8 p~\"@a8  V <E{sVw/I`EA>x۠+L~QUk#aW^iU'§LF `bWDoH$PbSB!o,H8 ` d$Iw& e P/fErF[`CƆ)(|C9 y19jp-K9{EH~x ӗ^jDyb8 8p@8 `[#o; !tpH 1pRpb`@7_e,%ʀg>+B$XJ*9|h=A/pY p0Ap@8@8H;B8"õjp`>[ҲhG8@g~ּm.O?~ֻtpgcy˖sԳΝf|.ԫ7];w^z]yUz}EDg@o66geK˗cI>v 4f-igҹ/²uk1ťb߾^?|=x84TZtmYX|֦z:WU9"J+d:t2<^jUոqդV-G =|]6ЮCvvn c32i4 }{37ǩݣG#coEsԙ~Grqq)-[ ڲ]>X gg7##&ӳJ\\Çɋ;y28<<>11]x[?jMg ݑDvv;w\]'?{t\Ry11IkWW5gΡ+NƦiSk֬_~E~B)~gYE\jȐ֫W7V:3dHd@+-&O*ݺ-:G>͝+Tl_tmkinݠ7c X˨TZȚxx8Ī޾iS~Ts神=ݿC̥7h|NΗ6taGYu=} ,Zߊthv޸q]֯?;9w=cFLܐh:Ə;||ګ߻PZƍ]RݨW?hݑ#7ߦ!w>cv~U.ShrG||Ç4~]JezzvZZ`~iow~أM(Xqvкú׷iSZt=F"j9|<ĉ]7zp!m~\ĉ]U7TI[Iq#&;;ٹBt<tNر;.@嗯 X ƽ.[ B?lpgES{$*W{պ.آtoeX:t]c5&&?OzX35o~~S ܐXi%$ oɸhll{\Λ &ߛ pC"rY}eʕ9w_p7nEOz x}OXn߾UOl^z PZ10 PqY`:UF k`wp! k`wp! k`wp! #XLC#%3=t*!.QIḙ 8@>@,3'|#@h0ݷkŀՁp*G {L@8)?<ѹ@8@Ȧ(qhoE@Zt^wKb'85jayִg0dJL/Џs1|ޗpY%t Fj p`ɀXL9o2ps9XnݒSl1է`~֞}$}c$$|֩zt3M {77PlNVw6튵ެ0Cc&n[tJ/!];>օRҾ\>k3mk_ug1r9EX-Ms{rQK+s$+4,,<x !Ur+_iޣ8f%)fl>tJĭ 8U.4WB^S ÁRvqHcTO#/b|j~~U}6s{Sl;R UUOZ5WQm6!{g%:/:gHiU!.@mc^kf{A{|ۯNm]x=\G0}l,UVU,7;kw|{ v#h*P/A}aӑ}Mag-iҾy&?7iS uvﺷF|_~?W:A4!Ħo?~~Kz|-?qSIo| 8@SF%_7︅.5v4Ve+V(:Wdj2c(G"1lLGuEummlۿ;<-9A5}o_T[*m{z)ٟ>{lB[rv*v0R9kos=xYYۼ5TuݿXF=dҢBE46~^ƾKF0/7m3v޾_<^>[y')uweSi l**飘{u^- 8Gƭ l'qW4Or(%gd D~oH$dRw"adnvRHI R'=9Ag BdJ_7zSc#BygvY0Sƒ!_}Ƭ/=4k:gDT՜ۗ΋L; 7.a(!f|x@zkhl~%<H GL>(ob g/@PNW9ߵ @8L' h+NZE '<@8L<"S JZD(s6#B;J&fD20~PAH(ƒD]?( `X` $s:FX`d`Ak0Z@D pX@D p@X`nA灑d BP @8t ! @8 ?C$ *A804#GKK@2@?"p0d@,fAdWڿ RC|pU< KB%H T}BuK7ݬ5V6nݙz3G5aˀ;U5zQE o#7龦W"Tk}hIRwgܭ^)StSK)Q?G5ԠMXqGN<<68=$=zugޮ]C \:ҵuHkq!B2z5?b`uv;B:C7v9:Wp5|8W3^Y*uDo-+߲E Aι?FԜUF$Q5EBR_wpڧնq`:5vgP[ !裠>MII ,K˓ s U>Wo@~˧.7Mzrukдi,E$$!DVyf !'T3S !5]izqf^NU(¦r˺cYelejRB\hy ,vs B&,Zm\sgiWni[cjoz8x~),*ji"SQJfk:df5g*QKz94%K;KҥUx+.lZJVgYЉ5g֬zekT@:DvuV嬤ߞBdeȬe;TԘSgiWޯU+;c[Bu/۸ٞ=6)+˜9eePnn4j韛'UǺG.T8SCZPڅWd(%r !=y* ;PZYIodPsIw=<۩ro[W~t O}sjF+mT[ N50r0xPjTh[ѶF3ßq\1J*e- ̩ή]dӸ=qN Ri,*DvvR_}U:5tbhڍ4E"zmtoR,!DliOy6J.+|r!Z͡7:&,eN}lWn_]y}= !>t}{yiy^¯Ԓ~77v&$QF4>gV<١{Љ 0G6uό̸Ѝd@2O5*s3]GIsf\|\\>va fkA2E;@8 ogɄ\b |X P `@0I<3h  " h ^"0@F !b@8@D Db@8P@J(NW9-mw %0N:p@P  8" H LO ٮC|D# h3 B pAATAʀ@61O^dt(0a| 00 sdAd7J5/IENDB`galera-3-25.3.20/gcs/doc/GCS_Architecture.odt0000644000015300001660000007265513042054732020324 0ustar jenkinsjenkinsPK O7^2 ''mimetypeapplication/vnd.oasis.opendocument.textPK O7 content.xml][s6~_dlKE$O.'Imw:5nK "!$bYyݗi%{;):SqKpwAOCYܻGgrڟ^W|Mvcq3vME/^x#[o{qpG]D )R߈08 b=Em;#m/Y{[]팴j:]Fv'>VQ zD4q0ʵM1-9d`OPwO@Wܥj e_{Ϣ#3yPbٷ#fa?1lM,w6}3MۢBgC~d['n=w;3W(,Vc8fha$o*'<Ky,!xvdMޝ^>uh'h0o9rᶈ\qFdZ.1͕0Ӏ.җ(V?iOj/<HAd .j>u%$ad/6~k@[P'R!v ֋s'Ft/X{ FgdrǶz$k²r ,s_^(z*9lCHXa;)!hёiBڼQΑ8vX&x1k{l{l{LU~T|WdC,A~^P*4> *@J%V*䳑R䳑zsNQtXe%a>Յ4'+?-Ow$ŚKtuڿ@Qvd1{bucrGmlʬ';޳XmA5}۳#l/CǑ]vw>=n?~5@è6.pww"j dX-y}8v$K 4g@;| v@cˠv>.y^+K2^|v(a)R~t.la.1+;{)O!5VTږ尔BX03" 훭%>/hk0281Rv+J9 3 x߱];㪽v c}5x*N?He`Ee;UV{nUGIeJ14q-D9:0WH|壏FRCM1Nf ~S ⨊XZdEռ)/.hg\%ƙ TFJt+*OS !gxJ{h:hoNiitvvVڬS[`lf6|G`nE#s$;Xۃ mu~fk;dR'U'b^)}|-㨵 4oq4h?xN.pLEZ5:4RԾsfN״' ~ U B70NΦoܺrglReC7}GE\K@: Qу|+Q~PUj7}`숙Q0 fkrP ]'ӭi]a#)m|V7_-|: -pr KT#Ph'3;[ypFjz O4 Ԋ8K*%m } ƽoX hhs€:|Taf+%^ձuh0N2>+`?L$a _2(؈h_s&! TCI~B^/&VH>A .̹e/l@_ISƈ/;˩9JoJ1ʫMO>+D|.D2vm zʞZ`w& B'_"xCzohϬFSֳNzF>+T𹐊/Re&ԇvcWDv$% 4=cl1ӀS󫴡:&OMɟ5LUc΀2ҷ* 2iPBR@/l>hʹ%읯gL" p%6fҨ83k-$Cgڠ/1յvi۸/3kZJ1mc10fy HmeaHEܲ[lFz]|u<e\[)l"ߙG[REQkW څ"s8m6P&M2S+yA7 ف6'KoH 51rиc̴ّm\T6pHTFECTgJQ92 4<D'՚4STI5 ;8 PzKԲnF hکnn7ɺc0Gc4_X‚5Z9Vľ\^(\KaӸ\Ykq|,RX  3 | YA2'Vxp. lp.E%)UGmܑ!G%BaYJ\7BY_'vHD2@:f@~-4~'ߕl,vBRLҁ^ 3 0cU$lQ3By0ئ:\(N X p$(tSrmg8xCjN"-'FQJWu36z%1גYYȌ$Rq`a9epj %P+urOLDKA./P|N1G[X~qaE((Z2!"#-) *uB[ i g"i+y}T$h-#HM vEX@B=f5B|ɇ h' ^$AJNH`Ϣygq%<:ey Uz$s׉@Μ0r*#F&Q;mU>DTl@5+7 +eơ b_$KqX&2%!2l4lL B^q 3.佌U\ ~S%*~ɨd0]e Bccl eΎ'0 6k_oi-&^?PKPPK O7 layout-cachecd`d(c``d``2PKXsPK O7 styles.xmlZ[۸~\ldKMvI Zm6)/$ʦ|t.-NĜpԻe1t"L3?x5ßޱݎdx.1:S1 C E%mX=iʆv`g9v5stYa~,x⌕ʊsAf29Nl~뉖6g yQy6VI:N'[bڧI.v &}ksC;%&AS :v\qR AcYi'淇>݅8{.c9g`'"+zenym{t\<2&4G.ⱒC{J9?HO=r"9qJ'Yd8 |TU(E*"6J'LytOhz9qo @N4}{A ܒfNgN pzbW:ώmX{# [Trץ'nP+n~`*GO ߻IrmgZ=%Deb;(w!ajVϯSD뻛S~mDx[0 oX=ungS3n!y$lS=D҉}~s~%~nhåuglnWCvHA>fb{V~24.ѹ5Ll+`3}>x\U&CwРz04,<봽p`e#Lz%z&Dьfo4Z*ÕIcPK) l/PK O7meta.xmlA01$jzCCOcO`#c0u7&yL-J} |BKThޔ,*2mU5uoղPS˩@-tN~SnT_5Zicq_Q'jLSc(Pc"xdφy$4]~\k־7l:,sP`&{kfn{WFnq J~B= ^RI+Y^Z ޯ w=-1ܯH0 YF4QDpYG-""CMLɮ]?2RYtl 3bֲ=1mgDd`*oX(tه$ =)#~z;-t75$,W˖?Y{`sXֶx @\7ʦ~{(X]ťZp .S 6b7xPmPb'PKA#PK O7Thumbnails/thumbnail.pngeT[KK h-^{pV8/R]B/nAA)(5K$D04$SysBL-_mF6!L- `TuVԐ`D1N'"9>^[3nF͟2e/)4p(X8S-ElZwYc8ϏgݞX3A-N1yL5mEb042DV?0bNf3uC F70`h Sv帀S\mߎP{Y],u]z4ZA7kx@l^G4y S1!8 Ƭ]:nq]篔aΎVrfx;#aˎuo^ z; ƻ17#,ud~ˮ90+]u`Aq%3f<^*$>m Q us082̘ϊ,D9cdiwΖ%* |<67َOHpmY\>.Mҳ6?ɬC1Λ- _+GOIMNe yGNa7)tzCOCgKU/Z>K\7z`t6cw5{9)KZeC6 }S=+.Z(v @pʡӵNXu>?e>f&lG%e`YH_|{5a=ㅚTdYy㓬y11u wqfܰ,)Y}T%lv]Z<~0s,Nَ㮞 r M}G+tC.p7 4 Ft%jv}f( yWvNDžfPz edAZZ,5ސq! (tL."h1;u,@4Rܷ})ׁFu~CYr/c?/67򯵤b9z<~&vZ%'wu1 {<nI:ڃ qMBAg0ǥ)Fz\lw4 ~:D)"\q$Ymֽ9כƄIya0PtzKLbs ǘ s{AsS0KfVbyW2 9D߈А嗎$8L#|c ƣDöAu -a !J\N "äidy%|o?F?#IάfE6ӒyW ‘O4=FѝI'.}烰טn}ӥ}haSq{DsxR~dCΐy=fgUj6Hl-i!^I_n9:2sg[ֿXp&xGC$ 1)nϮvx: ҽD+ ygEkbk-ݟx* X9nWuƢSFyxR@8C}yO},M "o' D+tRx|RGIŵlUA7{g'902}BА"IBYx^B'x8t@ HB5b]"/Vb)sNaba;>[.K/n?'cl.KOlI@ɲ`G R595m9tey,җ5`/@҈9UjFƐ!‰6ٿ1'?Qz6pWލ,QEQQ,>^T8EEA~$a (7FUi5ԝuh}0|d=:#uZc|G8]z8&*O@ ?[eu_:؋b(SXCd_vni4Z`QW&~!yPyadZisM'5̸0_iu4j"AB:yeuXQQ1|SHUi4[-XC$^ i%6{q )r5ANܱl }>l'k~%He %ΰIK.QlqFlWJE3Дa . T4?C9#$ޯ3$P͍.ք}FbfGs^Z?1D'N=U.߆jцV=C˓زTaꧾZ=ސIȒK".t}lLxjuүv#JBg۔zO|F1[zHO7K.jSև!y<:Y3,zǙJݼ}_X_^Pku]s cQ \ ~b!j$2^hbn[FFOMdzqs#iSBk, n1 ЧLORBfx<|Ȥ(z4 PQ wzZ2ﲱVRٕTQ8X8XRT]WБ^潵1}nϴ,1Al+9(x>c,:Rώ%J.WhCA;zuʱ%}"Y&A;lfb?=gJO|,m׷9?2j"0ib.6>vʠNL1ۜN\ }ǁЎwGDhmajk;Fjz|{ ggJ31N8h@šDfAJN"&z$ǴLVd'%XyUZO/jx@"ǜ5٠G p#i~xtp(+gZ#ǢB5mJufRzwww\"jƲh ȴɘx|w,G'%CC#ڡ!3#;ʖ^g#;|H͝4sbhClc vbkZ<Ճhl6(sRqmE)=ҜYCg@0K..wH uՌ2i`XWvܼZȚ,ؗ%lQBۜ 4HMAq\6ja*ƅt^,T9˚OK!-W&&$ +ܶeISYUs¿[Q*kxiE{iMʧD[ZYKm; x hFY]xjK`%RPz!A׌t!.+emj*]xfCw<5 1W箧=r 5 sٹ)47BO wť!v=3TyE[Cqj*m ɐx`Acޜcv)A֎ &#lNT.l`*˒r L7Hy"yK8AI]~u؛}1Fm>!M pb ׿POW;֌)YgN@,xqUr<Ш4Ǜ f=4: Hqt@Ȕ0ڋ̊ÇeH,ۆpqE?8nbdOli+@s2pAJ78.z ޟ( JffʝdGFKq*&H̽$͋(ǎ@a0H* Wh{gqIc.߽,(txwo/\X¨َa{Ё}t y:uzvG3ަuDROR@\]["ufSM=®ѢS.uQr9K)rlNMbѬ!k9Yݛ#G{-#RX5K= 7`t\7.q6$HsMqk "ԇD"]s <Wޔ9M!JƇEPD*il6_]Mqc?8Sgyժtч-CCjQ9tNoAe[&F.Zkw#I sa?,``M%F;ǭVw]UrqbA]{8wЌf_et԰'ڢj3}.%mJ8~Ka.XO;ԙe*]x`ն;FT:RNdpw7g$:E 7!7e01Wx㒬fKN&`o:\>`]MqLW? šnbyL]czZ0${=ٯ4Ī-u^9w8uQ$s%uu" V: ʤZqE̳W*I3ng_r037J5Z`+mpOj%_iffM{5ڲh!mETg61%vzS3F@o5UV,G7Le?).qFqH u:S,Q0z{!&RnA[s1@+Lݸۉf/I3 2h1.Djxk {hmbOe%nwфø>SZc83.ov,Xc_}&\)muHo'@'#Eج+S%Ho7WM!,r#$!Y(uIDiPL'lȭdXrc7y?U.j_$o]ڌ*8Wk̙ռҞ"!{G7GMϑzP NZqhT-ԟ* ﻜH kêu 9h;Yr4u bܔͮn`cIܜ^+: s-u.4ܭ&֡Zc|xk; io/7 |^VTd<2F~?{>|YTc D)ǀaϿHwoʆJS͐#{oF9 ]*a84@-kH6]=:t*j>g6*&v"xRVa t4]W\DHnlD4!GX9KXioSB1S[#|g*rn^м7*t,5>t[ /iz"$i>Ը$j\bo[u:U-єp?W׀"Dq ݁(\$ʊeqpv 98fļ>fePs\i x~ۜ8iŹp8g>'x*dP5Đ Qdph5>czRJ~o& ),$)@9#ѭ`'8KQmCi>j[yG~"9.XH٨ 8(k_3~c9  kBCO4wsU~j5m9h:jJN]K!%F..+}7Փ4Noj?-f)CqhOIZrhوkzS{ x V ?O|bE5A Sq--xɼaA6jg{EuVOw =(d)P7Mא̤uf_ۋa+ UL`z#`J o=)|Ǥ3պj@ eKwG'GA'ڄ@GDn~_ Xhq4ax3EH Q-ϞBONR!,Zsnp{Z䐵?S>Qud?uއ Rƨ%E47W"3ߩFYY->AN7K7Ht> o*X e\:|~;nL0 :.'wc+?6^uICK֣;'rQ8H>j.# CyM|/xӹ{/ZX Me1>,]N?ރˇټ-z|ۨ:4ֻ)h yj޳J*eWK7; MY [L.Cpw=8.AT8$^ҽJkUk$`93K,#W 9?)p2R!y n8lBpE},j v ;z8N\ܜ4?Xߜ]-F!.ER858W,,'w_ q 8Ɵna"c뛿á~{~\=J] 3r_`J!c" {0?hdY$"i'5ae|;(ޥ7`a^$H4 Ͽ}!Z 9Q=y vM|wƓv26-`BFHuzkuvaղS9ǻZKAXx.a-qXfFߣ.܄$ 5N۱LEs`wWOsĂ@㎛ޝ+Fټ\dQ7٨Ӑd?wݭ1ԷZքdt d9)j >.vqt.u[mjY 8xZ2  G&>SO] HҧR;U7:H,kŘxxqFɡnM&өΈۨ-SEҖ5NWpm>$$"}8=e!lע(ޞ' Q :]9 2TPsֽG%6kZ@RjD وk#ݱ'<_ mXCOPfR@!\|~ϻx#mI]S,g.=) BE!Er]jq$NI&(Ek2!BY. /l *챂G{?hLpjzLkv8=OIA?[=h]6S){cޒ==w F]|x"v#^1ePM¯_ UzM)Y!CÜsM;*$vm+] U#w3_Mq<ܗQJfݾw^͔/=Jx5%45%/ZkII&UN>?'zvQP?ܜύ6LN8woPiP' IN%HxX T Eg qip:JA]#0!T5jg8]Qf2˞y6m+!V>y '"/clcqta\4o*59\?Mg # a{ 9dJ}~oΙ)N;` 9,4M`蟪 %˙nPV/݂LLOn!u{OsKuH=i4FJbE3.-.W&Vb" AF)y]3 UVb֗=0H413}QQ|M tKޠ|?N+Th(!uuCƹǟ Gc)ދ"`q'Sޒ8iZ\n}~`KKm}!1EuyΛ 8n2FHՋM2(~t3xۻ)TW}IיŁӉ~85`v2%>M1=MOh_>\p2X_փ`{WvJz?`Q A]9 `gP#"LñhSGSM2,pFp>э?I] JI|kKnWuc5{V׉W=RƌEB{z~4rAveT֘6ʋ´~e,rOy0_;- 祚T<Uuɯ+o> _tMl~%];,N ֪@.*f6 1r5w_jho1>C 1X%+<\+Rx11ḹ([\L?#Oq3ٸ䛡GVaN?x/*LE鰒~ԉ"_Jwߝ>yvfU֙R:ĨecNg)y04?לʨ7KS _eE{0M,;^t8( /)ڸv.^~m_DLyV#ê,?dy2A}9j~Y7g5F&;"Fepl &6/X`~} ޞz(R Z֗mI0Ƶ=fT~RT{T;MF7't$쀆U4ؙ%*ICf=i^vt+H h2<[-cT`"i$QzOJDg1e@%%g'zpIvk6BkA7;o,@EL9@{Y|9pE{~H:WE#e$Z*EfRĴ i=iT8 C[WoP{k-,#>$R}2V,1M7M}fdfƇn9;)m11D_tL~e5v;Yp]Q|UE)W6UٖՁp|h(IGP<_kIYQ7ˡ>eH[_b$3JGy8}H sO34%'n[S.\ R@/uNˋe.,P)>(#{'N'uk;qur|h@|K)#'{<~~" 3۾r_ \wh?oZjFKP(p6"t ˽*&K` oGGo3}=GW72<8Ɩ($.6Zrs;@Mi9D8n Lg3RwJ5DPLM#Yj{ᇢ&#hPrn:cC͠NgG$@֯uxCYޜZݥ(i΋u/rCO.!L=Q;|CP {kq-nJЄZg6ᐹ: jﲉ;{k^!ߒ8'OȪ $- %h*R 2Ȫ~U?hC49?9OWM;ro)ˇڡHͫA7kޝR_Tzo_̝=&z;Q.[~u6?ț5 3+wᄈH{b.S>3w>!7![WݿD`)og\ $IJKO5\B}u'$|ms 򯭉qj , m4YWО=v?oy߰j;[`W:͵bU׺cƄU.Zܒ=ܕ|W_#&kc5;;,+O1mSe?G(dXy'ѧދorOJJ^CW ޴u]&@DCTQ](!n>eol')[ƙ /RM*EM7=Ӂ:.C@ZrQB,K.7kw5_suӘ,>XLjދܽ~=Fȏ80mE,[)yωaRa%@6D[ cOBZ9GbEO}`FO!lIW:iFVоٝ2KRndU( %OV;B8rJ />ŤѨA~Q:.¹S2fJWadzH(-/7ՃU~']hf'ޜLN`@9 &-ry64 Z2Zu:)0@ ' myW^<4<,^^jێy }54.ɷ[綝/y=y*x?H7VKʲ!l`=~rʢ$͐ϛD!@0*.I/'|qo]6v֜yo)^&PbfG0g)W,Qi%]554{u̇ f߫?$&Z%0 CS'ThWkw&$Zj==|;QF3j'apY_Tj D3>}U1CcȒq/ZER;̭%؏~I+Y/|,3WDXLCl"oKKI~7Ww\|cs`~^T<#fT:[KYNA5;ՇXGXƚs&j<㍯`hO{]jiѢ9[& Y礨;t(q9~uI?Nc]XG֎t2hrԘ|^yz\^r(YŖt:j}1gXҳy+Ex@}N>' dYnr0u|%Քrmm`Sw|zAyD(;{eޮmcN]3͈'/>)rz(cEk)'.c^?T4k=4慊XðYOƿL`'d#OuPr kv[䩌WtPT3 (1UP٣nQ*e>+{Y_7)o!D8Є~bSXB YrXъ2^>.ͱ;_v?۳ 70]y#>)W^<¤e{WyiCoI}3 fIuw0ji+Fc|Yc)msq7X="q뇉wB L W-e >t9M@%9!|&6OШ{iuXy}3*u;o̗jW:~cBWE0oC l[/J No=duҌc뗉UJñ&/ţk<íS{'2s&c ECBXP:Lc]&Tp@݀)[R3<.qzUZGl&.0B԰O[s޼~AuQn.@lBHJ 9TO=|a??LOppZ%u{3wL1oGS9;w=Ջ-Uyd*Zʠy~SYC>My,]z8¹SQIqxq_#Ş'Scu%Q h9IAVuM0=83p R]{. z\^5vM pײf$PP2Am?aF1oX!FL7MdL.\ZBèAecS ]f#>U{S V*50S*R G,>hY4tUu#Ԝ!yyQY)ilfOhSXwˍ*t;EE5UV= / 8&J/1.g#3ϟ}o=FXSx=rFzlg|K UֹѨGq"R=K#,cg: iyԱ)@AEK9/iv7|?taAS޾Ear|\-Pڄ g7 5rN.&2ͫ\ov&MrB(i_-TFs~s6}MMN_ײ'b@󫒱GΛAC:n c5Vc(،8mN'w~Xp==b;*~~5c|(h,B|''i~T7(mSDQG/}J73}'B ,Ӌ7{QipO@&kjK]4|ݬXy\JL>BG_=բՄNq$s[Ts{ 1ؤe\wq;Ɍm_HFFKE~޷I'שGIo936Tdx᫓Z:Pw 8͐Nvn>_+,zw<53ǻELM,ȊWxF!&ڭQ5ܫm yvXsZˮQ^Qp`L?HbZd-:Y7$|#X<};R)ZsOn7-ؼקKGJ6<\-q֏.cuu_\cߕ/6uOP[\1O?0gW QkibmRyo5Kpe+E'.Io_֩I߱k' V2Ko6~_wdn|GRkW׳~|MJ%FJ66qZ|!uӂ{})*=R}=q:)kk^|.}ngɧ F]O7?Gy%zQg֚ew_x3N?]roUx|q;r4S_*weWLaV ;^02/ٹ4z˫?OҮGƝW^p\v׹nHn`4r,yӘ)g~/[\!T:%N=έu/n^yb#~4z#sˮR"zZֻ(=̗_٪$}QÛs336-x贄/>g= tsYPKCRrC DPK O7'Configurations2/accelerator/current.xmlPKPK O7Configurations2/progressbar/PK O7Configurations2/floater/PK O7Configurations2/popupmenu/PK O7Configurations2/menubar/PK O7Configurations2/toolbar/PK O7Configurations2/images/Bitmaps/PK O7Configurations2/statusbar/PK O7 settings.xmlZr8}߯H@T”c›l p"$$co@vLC(,uԗ2w?6+z&Bzgrߴa.[gq==1waZ`Lw@2GғeVD[>aegce'걗R)͆a-,|b͕Jl:ܒ ~\\q_ YWlx <9:O /_H@ )MHj|" W&QG'Nk|Lghc#11^1aLG:tBX2Ljn'gu`Oyd`-KnNnbد*Q;ڥng6 D`ٷ֍mH4/g'_\;p(|\6Fa`6}4gqksKkaLLZ45;z=ے}B`\RgJ{PC h\*8)U̷R+_uާ9aBCTdz``8^]Ҙ|8tGNDŽ Pȝ4PN!!.:\?1Fi}n-ID)ty :p6HLU/dw#g5eEwn{y`]4)wo"z*ΉK\ 8'A=_oYQg.XK-`06ni@-= (OMΐd2. *So -._IPFX-Hw>qӴ)P XX5y֩d^_OCqw`98˰P x)wVWu,ΫdL遷B$HrL| n^nb p``pKjF<w$kuz@(8 )~"g0zU|9#{MJRŘaF瞞rPKrTPK O7^2 ''mimetypePK O7P Mcontent.xmlPK O7Xs 1layout-cachePK O7) l/ }styles.xmlPK O7A#^!meta.xmlPK O7CRrC D#Thumbnails/thumbnail.pngPK O7':gConfigurations2/accelerator/current.xmlPK O7gConfigurations2/progressbar/PK O7gConfigurations2/floater/PK O7hConfigurations2/popupmenu/PK O79hConfigurations2/menubar/PK O7ohConfigurations2/toolbar/PK O7hConfigurations2/images/Bitmaps/PK O7hConfigurations2/statusbar/PK O7! isettings.xmlPK O7rToMETA-INF/manifest.xmlPK(oqgalera-3-25.3.20/gcs/doc/Doxyfile0000644000015300001660000014366113042054732016200 0ustar jenkinsjenkins# Doxyfile 1.4.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = GCS # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../src # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.c *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO galera-3-25.3.20/gcs/doc/Coding_conventions.txt0000644000015300001660000000533613042054732021057 0ustar jenkinsjenkinsThese coding conventions were not set from the very beginning. They emerged as a result of coding. Therefore they have some practical basis for it. Not all code adheres to them, but that's what it should be like. Attempt was made to justify at least some of these conventions, but it's not about right or wrong really. It's about consistency. 1. Indentation. Tab width is 4, tabs filled with spaces. No real tabs. There is a controversy about this issue among programmers. In defense of this decision I can say that indentation is some sort of ASCII art and ASCII art does not always work good with tabs, especially when you need to alter it by hand. In other words: spaces are less flexible, but more predictable. 2. Braces position. Opening brace on the same line as the statement, see example below. 3. Spaces between tokens. See example below. 4. Function declarations/definitions. See example below. 5. Naming conventions. All names and identifiers are all lower case with underscores except macros which are all UPPER case. 5.1 File names. All C file names are prefixed with 'gcs' to avoid collisions in the file namespace with headers from other software. Prefix is followed by the module name. If module consists of more than one unit, unit names follow module name. Like gcs_module_unit1.[h|c], gcs_module_unit2.[h|c] and so on. 5.2 Symbol names. All global symbols - exported through header files - are prefixed with 'gcs' (or 'GCS' where appropriate) followed by the module name. This is done again to avoid namespace collisions with the third party software and with global symbols from other modules that may be called similarly. Static symbols defined in the *.c files simply start with the module name. This is done to easily distinguish between global and static symbols and to prevent collisions of static symbols from different modules when doing tags search. Example: int gcs_module_external_var; static int module_static_function (int a, int b) { int c; if (a < b) { c = b - a; } else { c = a - b; } return c; } 6. Long lines. Lines should not exceed 80 characters in length. One more reason to use spaces instead of tabs. (Suppose you have tabs of width 4 and then some other person reads the code with tabs width of 8. Many lines will grow more than 80 characters and may not fit in the screen.) 7. Spaces at the end of line. Should be avoided to minimize the diff. 8. NO COMPILER WARNINGS WHATSOEVER. These conventions are not set in stone and can be altered eventually. However, for the sake of consistency, try to follow these conventions unless there is a REAL (and well articulated) need to do otherwise. IF IN DOUBT - SEE HOW SIMILAR THINGS ARE DONE IN RECENT FILES. galera-3-25.3.20/gcs/SConscript0000644000015300001660000000003513042054732015722 0ustar jenkinsjenkinsSConscript('src/SConscript') galera-3-25.3.20/gcs/ChangeLog0000644000015300001660000000552713042054732015475 0ustar jenkinsjenkins2010-07-18 Alex Substituted gcs_slave_queue_len() with gcs_get_stats() to return a wider range of gcs performance statistics. At this time it includes average slave queue length, average send queue length, fraction of time spent paused and number of flow control events sent and received. 2010-06-16 Alex Added gcs_interrupt() call to be able to interrupt scheduled threads. Version 0.13.1 2010-05-31 Alex Added flow control monitor and ability to synchronize with gcs_send() and gcs_repl() calls thus guranteeing FIFO order. Version 0.13.0 2010-05-20 Alex Added gcs_slave_queue_len() query to API. 2009-11-21 Alex Extended state message to contain previous primary configuraiton info. Many bugfixes and cleanups. Version 0.12.0 2009-08-09 Alex Added possibility to specify desired donor. Version 0.11.0 2009-08-06 Alex Refactored interface. Now connection URL is supplied to gcs_open() and not gcs_create(). It is done to satisfy wsrep API changes and is generally cleaner as it separates library initialisation from connection establishment. Version: 0.10.0 2009-07-21 Alex Added node name and incoming address arguments to gcs_create(). Thus it should be possible to give nodes sensible names and see them in logs. Version: 0.9.0 2009-06-21 Alex Moved TO module out of the library. Since it no longer offers this interface, bumped minor version: 0.8.0 2008-11-16 Alex Many bugfixes. Fixed handling of self-leave meassages. Switched to "mallocless" FIFO implementaiton in gu_fifo.c Resolved apparent race condition and optimized FC message sending. Package version 0.7.2 2008-11-09 Alex Changed state transfer protocol to require join message to be sent by both parties involved in state transfer. Package version 0.7.1, library interface 9.0.0. 2008-10-21 Alex First implementation of state transfer request protocol. Bumped package version to 0.7.0, library interface to 8.0.0. 2008-09-29 Alex (postfactum) State exchange (GCS state exchange, not application state exchange) implemented. Now we have some sort of quourum calculations and global-scope sequence numbers. New nodes can join without having to restart the whole group. Bumped package version to 0.6.0. 2008-08-01 Alex (postfactum) START/STOP-based flow control. A little bit ahead of the plan. 2008-07-30 Alex Added gcs_join() and gcs_wait() getting closer to final API. gcs_join() moves conneciton to JOINED state. gcs_wait() blocks waiting for the group memebers to catch up. 2008-05-14 Alex Added gcs_create() and gcs_destroy() for safe and clean initialization and deinitialization of GCS connection handle. 2008-03-23 Alex Added gcs_set_last_applied() and gcs_get_last_applied() - calls for voting for the last applied action. galera-3-25.3.20/gcs/README0000644000015300001660000000115113042054732014570 0ustar jenkinsjenkinsWelcome to libgcs - Group Communication system abstraction library. libgcs is intended to simplify the use of Group Communication semantics in applications which are not initially targeted for it. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY, to the extent permitted by law; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. libgcs is free software. Please see the file COPYING for details. For documentation, please see the files in the doc subdirectory. For building and installation instructions please see the INSTALL file. galera-3-25.3.20/garb/0000755000015300001660000000000013042054732014051 5ustar jenkinsjenkinsgalera-3-25.3.20/garb/garb_main.cpp0000644000015300001660000000455013042054732016500 0ustar jenkinsjenkins/* Copyright (C) 2011 Codership Oy */ #include "garb_config.hpp" #include "garb_recv_loop.hpp" #include #include #include // exit() #include // setsid(), chdir() #include // open() namespace garb { void become_daemon () { if (pid_t pid = fork()) { if (pid > 0) // parent { exit(0); } else { // I guess we want this to go to stderr as well; std::cerr << "Failed to fork daemon process: " << errno << " (" << strerror(errno) << ")"; gu_throw_error(errno) << "Failed to fork daemon process"; } } // child if (setsid()<0) // become a new process leader, detach from terminal { gu_throw_error(errno) << "setsid() failed"; } if (chdir("/")) // detach from potentially removable block devices { gu_throw_error(errno) << "chdir(\"/\") failed"; } // umask(0); // A second fork ensures the process cannot acquire a controlling // terminal. if (pid_t pid = fork()) { if (pid > 0) { exit(0); } else { gu_throw_error(errno) << "Second fork failed"; } } // Close the standard streams. This decouples the daemon from the // terminal that started it. close(0); close(1); close(2); // Bind standard fds (0, 1, 2) to /dev/null for (int fd = 0; fd < 3; ++fd) { if (open("/dev/null", O_RDONLY) < 0) { gu_throw_error(errno) << "Unable to open /dev/null for fd " << fd; } } } int main (int argc, char* argv[]) { Config config(argc, argv); if (config.exit()) return 0; log_info << "Read config: " << config << std::endl; if (config.daemon()) become_daemon(); try { RecvLoop loop (config); return 0; } catch (std::exception& e) { log_fatal << "Exception in creating receive loop: " << e.what(); } catch (...) { log_fatal << "Exception in creating receive loop."; } return EXIT_FAILURE; } } /* namespace garb */ int main (int argc, char* argv[]) { try { return garb::main (argc, argv); } catch (std::exception& e) { log_fatal << e.what(); return 1; } } galera-3-25.3.20/garb/garb_logger.hpp0000644000015300001660000000044413042054732017036 0ustar jenkinsjenkins/* Copyright (C) 2011 Codership Oy */ #ifndef _GARB_LOGGER_HPP_ #define _GARB_LOGGER_HPP_ #include namespace garb { extern void set_logfile (const std::string& fname); extern void set_syslog (); } /* namespace garb */ #endif /* _GARB_LOGGER_HPP_ */ galera-3-25.3.20/garb/garb_recv_loop.hpp0000644000015300001660000000177413042054732017556 0ustar jenkinsjenkins/* Copyright (C) 2011-2014 Codership Oy */ #ifndef _GARB_RECV_LOOP_HPP_ #define _GARB_RECV_LOOP_HPP_ #include "garb_gcs.hpp" #include "garb_config.hpp" #include #include #include namespace garb { class RecvLoop { public: RecvLoop (const Config&); ~RecvLoop () {} private: void loop(); const Config& config_; gu::Config gconf_; struct RegisterParams { RegisterParams(gu::Config& cnf) { gu::ssl_register_params(cnf); if (gcs_register_params(reinterpret_cast(&cnf))) { gu_throw_fatal << "Error initializing GCS parameters"; } } } params_; struct ParseOptions { ParseOptions(gu::Config& cnf, const std::string& opt) { cnf.parse(opt); } } parse_; Gcs gcs_; }; /* RecvLoop */ } /* namespace garb */ #endif /* _GARB_RECV_LOOP_HPP_ */ galera-3-25.3.20/garb/garb_config.cpp0000644000015300001660000001133113042054732017014 0ustar jenkinsjenkins/* Copyright (C) 2011-2013 Codership Oy */ #include "garb_config.hpp" #include "garb_logger.hpp" #include #include #include #include #include #include namespace po = boost::program_options; #include #include namespace garb { static void strip_quotes(std::string& s) { /* stripping no more than one pair of quotes */ if ('"' == *s.begin() && '"' == *s.rbegin()) { std::string stripped(s.substr(1, s.length() - 2)); s = stripped; } } std::string const Config::DEFAULT_SST(WSREP_STATE_TRANSFER_TRIVIAL); Config::Config (int argc, char* argv[]) : daemon_ (false), name_ (GCS_ARBITRATOR_NAME), address_ (), group_ ("my_test_cluster"), sst_ (DEFAULT_SST), donor_ (), options_ (), log_ (), cfg_ (), exit_ (false) { po::options_description other ("Other options"); other.add_options() ("version,v", "Print version & exit") ("help,h", "Show help message & exit") ; // only these are read from cfg file po::options_description config ("Configuration"); config.add_options() ("daemon,d", "Become daemon") ("name,n", po::value(&name_), "Node name") ("address,a",po::value(&address_), "Group address") ("group,g", po::value(&group_), "Group name") ("sst", po::value(&sst_), "SST request string") ("donor", po::value(&donor_), "SST donor name") ("options,o",po::value(&options_), "GCS/GCOMM option list") ("log,l", po::value(&log_), "Log file") ; po::options_description cfg_opt; cfg_opt.add_options() ("cfg,c", po::value(&cfg_), "Configuration file") ; // these are accepted on the command line po::options_description cmdline_opts; cmdline_opts.add(config).add(cfg_opt).add(other); // we can submit address without option po::positional_options_description p; p.add("address", -1); po::variables_map vm; store(po::command_line_parser(argc, argv). options(cmdline_opts).positional(p).run(), vm); notify(vm); if (vm.count("help")) { std::cerr << "\nUsage: " << argv[0] << " [options] [group address]\n" << cmdline_opts << std::endl; exit_= true; return; } if (vm.count("version")) { log_info << GALERA_VER << ".r" << GALERA_REV; exit_= true; return; } if (vm.count("cfg")) { std::ifstream ifs(cfg_.c_str()); if (!ifs.good()) { gu_throw_error(ENOENT) << "Failed to open configuration file '" << cfg_ << "' for reading."; } store(parse_config_file(ifs, config), vm); notify(vm); } if (!vm.count("address")) { gu_throw_error(EDESTADDRREQ) << "Group address not specified"; } if (!vm.count("group")) { gu_throw_error(EDESTADDRREQ) << "Group name not specified"; } if (vm.count("daemon")) { daemon_ = true; } /* Seeing how https://svn.boost.org/trac/boost/ticket/850 is fixed long and * hard, it becomes clear what an undercooked piece of... cake(?) boost is. * - need to strip quotes manually if used in config file. * (which is done in a very simplistic manner, but should work for most) */ strip_quotes(name_); strip_quotes(address_); strip_quotes(group_); strip_quotes(sst_); strip_quotes(donor_); strip_quotes(options_); strip_quotes(log_); strip_quotes(cfg_); if (options_.length() > 0) options_ += "; "; options_ += "gcs.fc_limit=9999999; gcs.fc_factor=1.0; gcs.fc_master_slave=yes"; // this block must be the very last. gu_conf_self_tstamp_on(); if (vm.count("log")) { set_logfile (log_); } else if (daemon_) /* if no log file given AND daemon operation requested - * log to syslog */ { gu_conf_self_tstamp_off(); set_syslog(); } gu_crc32c_configure(); } std::ostream& operator << (std::ostream& os, const Config& c) { os << "\n\tdaemon: " << c.daemon() << "\n\tname: " << c.name() << "\n\taddress: " << c.address() << "\n\tgroup: " << c.group() << "\n\tsst: " << c.sst() << "\n\tdonor: " << c.donor() << "\n\toptions: " << c.options() << "\n\tcfg: " << c.cfg() << "\n\tlog: " << c.log(); return os; } } galera-3-25.3.20/garb/garb_logger.cpp0000644000015300001660000000213513042054732017030 0ustar jenkinsjenkins/* Copyright (C) 2011 Codership Oy */ #include "garb_logger.hpp" #include #include #include #include namespace garb { void set_logfile (const std::string& fname) { FILE* log_file = fopen (fname.c_str(), "a"); if (!log_file) { gu_throw_error (ENOENT) << "Failed to open '" << fname << "' for appending"; } gu_conf_set_log_file (log_file); } static void log_to_syslog (int level, const char* msg) { int p = LOG_NOTICE; switch (level) { case GU_LOG_FATAL: p = LOG_CRIT; break; case GU_LOG_ERROR: p = LOG_ERR; break; case GU_LOG_WARN: p = LOG_WARNING; break; case GU_LOG_INFO: p = LOG_INFO; break; case GU_LOG_DEBUG: p = LOG_DEBUG; break; } syslog (p | LOG_DAEMON, "%s", msg); } void set_syslog () { openlog ("garbd", LOG_PID, LOG_DAEMON); gu_conf_set_log_callback (log_to_syslog); } } /* namespace garb */ galera-3-25.3.20/garb/files/0000755000015300001660000000000013042054732015153 5ustar jenkinsjenkinsgalera-3-25.3.20/garb/files/garb.sh0000755000015300001660000000741313042054732016432 0ustar jenkinsjenkins#!/bin/bash # # Copyright (C) 2012-2015 Codership Oy # # init.d script for garbd # # chkconfig: - 99 01 # config: /etc/sysconfig/garb | /etc/default/garb ### BEGIN INIT INFO # Provides: garb # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Should-Start: $network $named $time # Should-Stop: $network $named $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Galera Arbitrator Daemon # Description: The Galera Arbitrator is used as part of clusters # that have only two real Galera servers and need an # extra node to arbitrate split brain situations. ### END INIT INFO # On Debian Jessie, avoid redirecting calls to this script to 'systemctl start' _SYSTEMCTL_SKIP_REDIRECT=true # Source function library. if [ -f /etc/redhat-release ]; then . /etc/init.d/functions . /etc/sysconfig/network config=/etc/sysconfig/garb else . /lib/lsb/init-functions config=/etc/default/garb fi log_failure() { if [ -f /etc/redhat-release ]; then echo -n $* failure "$*" echo else log_failure_msg "$*" fi } PIDFILE=/var/run/garbd prog=$(which garbd) program_start() { local rcode if [ -f /etc/redhat-release ]; then echo -n $"Starting $prog: " daemon --user nobody $prog "$@" >/dev/null rcode=$? if [ $rcode -eq 0 ]; then pidof $prog > $PIDFILE || rcode=$? fi [ $rcode -eq 0 ] && echo_success || echo_failure echo else log_daemon_msg "Starting $prog: " start-stop-daemon --start --quiet -c nobody --background \ --exec $prog -- "$@" rcode=$? # Hack: sleep a bit to give garbd some time to fork sleep 1 if [ $rcode -eq 0 ]; then pidof $prog > $PIDFILE || rcode=$? fi log_end_msg $rcode fi return $rcode } program_stop() { local rcode if [ -f /etc/redhat-release ]; then echo -n $"Shutting down $prog: " killproc -p $PIDFILE rcode=$? [ $rcode -eq 0 ] && echo_success || echo_failure else start-stop-daemon --stop --quiet --oknodo --retry TERM/30/KILL/5 \ --pidfile $PIDFILE rcode=$? log_end_msg $rcode fi [ $rcode -eq 0 ] && rm -f $PIDFILE return $rcode } program_status() { if [ -f /etc/redhat-release ]; then status $prog else status_of_proc -p $PIDFILE "$prog" garb fi } start() { [ "$EUID" != "0" ] && return 4 [ "$NETWORKING" = "no" ] && return 1 if grep -q -E '^# REMOVE' $config; then log_failure "Garbd config $config is not configured yet" return 0 fi if [ -r $PIDFILE ]; then local PID=$(cat ${PIDFILE}) if ps -p $PID >/dev/null 2>&1; then log_failure "$prog is already running with PID $PID" return 3 # ESRCH else rm -f $PIDFILE fi fi [ -x $prog ] || return 5 [ -f $config ] && . $config # Check that node addresses are configured if [ -z "$GALERA_NODES" ]; then log_failure "List of GALERA_NODES is not configured" return 6 fi if [ -z "$GALERA_GROUP" ]; then log_failure "GALERA_GROUP name is not configured" return 6 fi GALERA_PORT=${GALERA_PORT:-4567} OPTIONS="-d -a gcomm://${GALERA_NODES// /,}" # substitute space with comma for backward compatibility [ -n "$GALERA_GROUP" ] && OPTIONS="$OPTIONS -g '$GALERA_GROUP'" [ -n "$GALERA_OPTIONS" ] && OPTIONS="$OPTIONS -o '$GALERA_OPTIONS'" [ -n "$LOG_FILE" ] && OPTIONS="$OPTIONS -l '$LOG_FILE'" eval program_start $OPTIONS } stop() { [ "$EUID" != "0" ] && return 4 [ -r $PIDFILE ] || return 3 # ESRCH program_stop } restart() { stop start } # See how we were called. case "$1" in start) start ;; stop) stop ;; status) program_status ;; restart|reload|force-reload) restart ;; condrestart) if status $prog > /dev/null; then stop start fi ;; *) echo $"Usage: $0 {start|stop|status|restart|reload}" exit 2 esac galera-3-25.3.20/garb/files/freebsd/0000755000015300001660000000000013042054732016565 5ustar jenkinsjenkinsgalera-3-25.3.20/garb/files/freebsd/garb.sh0000644000015300001660000000577613042054732020053 0ustar jenkinsjenkins#!/bin/sh # # garb.sh for rc.d usage (c) 2013 Codership Oy # $Id$ # PROVIDE: garb # REQUIRE: LOGIN # KEYWORD: shutdown # # Add the following line to /etc/rc.conf to enable Galera Arbitrator Daemon (garbd): # garb_enable (bool): Set to "NO" by default. # Set it to "YES" to enable Galera Arbitrator Daemon. # garb_galera_nodes (str): A space-separated list of node addresses (address[:port]) in the cluster # (default empty). # garb_galera_group (str): Galera cluster name, should be the same as on the rest of the nodes. # (default empty). # Optional: # garb_galera_options (str): Optional Galera internal options string (e.g. SSL settings) # see http://www.codership.com/wiki/doku.php?id=galera_parameters # (default empty). # garb_log_file (str): Log file for garbd (default empty). Optional, by default logs to syslog # garb_pid_file (str): Custum PID file path and name. # Default to "/var/run/garb.pid". # . /etc/rc.subr name="garb" rcvar=garb_enable load_rc_config $name # set defaults : ${garb_enable="NO"} : ${garb_galera_nodes=""} : ${garb_galera_group=""} : ${garb_galera_options=""} : ${garb_log_file=""} : ${garb_pid_file="/var/run/garb.pid"} procname="/usr/local/bin/garbd" command="/usr/sbin/daemon" command_args="-c -f -u nobody -p $garb_pid_file $procname" start_precmd="${name}_prestart" #start_cmd="${name}_start" start_postcmd="${name}_poststart" stop_precmd="${name}_prestop" #stop_cmd="${name}_stop" #stop_postcmd="${name}_poststop" #extra_commands="reload" #reload_cmd="${name}_reload" export LD_LIBRARY_PATH=/usr/local/lib/gcc44 garb_prestart() { [ "$(id -ur)" != "0" ] && err 4 "root rights are required to start $name" [ -r "$garb_pid_file" ] && err 0 "$procname is already running with PID $(cat $garb_pid_file)" [ -x "$procname" ] || err 5 "$procname is not found" # check that node addresses are configured [ -z "$garb_galera_nodes" ] && err 6 "List of garb_galera_nodes is not configured" [ -z "$garb_galera_group" ] && err 6 "garb_galera_group name is not configured" GALERA_PORT=${GALERA_PORT:-4567} # Concatenate all nodes in the list (for backward compatibility) ADDRESS= for NODE in ${garb_galera_nodes}; do [ -z "$ADDRESS" ] && ADDRESS="$NODE" || ADDRESS="$ADDRESS,$NODE" done command_args="$command_args -a gcomm://$ADDRESS" [ -n "$garb_galera_group" ] && command_args="$command_args -g $garb_galera_group" [ -n "$garb_galera_options" ] && command_args="$command_args -o $garb_galera_options" [ -n "$garb_log_file" ] && command_args="$command_args -l $garb_log_file" return 0 } garb_poststart() { local timeout=15 while [ ! -f "$garb_pid_file" -a $timeout -gt 0 ]; do timeout=$(( timeout - 1 )) sleep 1 done return 0 } garb_prestop() { [ "$(id -ur)" != "0" ] && err 4 "root rights are required to stop $name" [ -r $garb_pid_file ] || err 0 "" return 0 } run_rc_command "$1" galera-3-25.3.20/garb/files/garb.cnf0000644000015300001660000000077713042054732016571 0ustar jenkinsjenkins# Copyright (C) 2012 Codership Oy # This config file is to be sourced by garb service script. # A comma-separated list of node addresses (address[:port]) in the cluster # GALERA_NODES="" # Galera cluster name, should be the same as on the rest of the nodes. # GALERA_GROUP="" # Optional Galera internal options string (e.g. SSL settings) # see http://galeracluster.com/documentation-webpages/galeraparameters.html # GALERA_OPTIONS="" # Log file for garbd. Optional, by default logs to syslog # LOG_FILE="" galera-3-25.3.20/garb/files/garb-systemd0000755000015300001660000000210213042054732017475 0ustar jenkinsjenkins#!/bin/bash -ue # config=/etc/sysconfig/garb log_failure() { echo " ERROR! $@" } program_start() { echo "Starting garbd" /usr/bin/garbd "$@" } start() { if grep -q -E '^# REMOVE' $config;then log_failure "Garbd config $config is not configured yet" return 0 fi [ -f $config ] && . $config # Check that node addresses are configured if [[ -z "${GALERA_NODES:-}" ]]; then log_failure "List of GALERA_NODES is not configured" return 6 fi if [[ -z "${GALERA_GROUP:-}" ]]; then log_failure "GALERA_GROUP name is not configured" return 6 fi GALERA_PORT=${GALERA_PORT:-4567} OPTIONS="-a gcomm://${GALERA_NODES// /,}" # substitute space with comma for backward compatibility [ -n "${GALERA_GROUP:-}" ] && OPTIONS="$OPTIONS -g '$GALERA_GROUP'" [ -n "${GALERA_OPTIONS:-}" ] && OPTIONS="$OPTIONS -o '$GALERA_OPTIONS'" [ -n "${LOG_FILE:-}" ] && OPTIONS="$OPTIONS -l '$LOG_FILE'" eval program_start $OPTIONS } # See how we were called. case "$1" in start) start ;; *) echo $"Usage: $0 {start}" exit 2 esac exit $? galera-3-25.3.20/garb/files/garb.service0000644000015300001660000000065113042054732017452 0ustar jenkinsjenkins# Systemd service file for garbd [Unit] Description=Galera Arbitrator Daemon After=network.target syslog.target [Install] WantedBy=multi-user.target Alias=garbd.service [Service] User=nobody EnvironmentFile=/etc/sysconfig/garb ExecStart=/usr/bin/garb-systemd start # Use SIGINT because with the default SIGTERM # garbd fails to reliably transition to 'destroyed' state KillSignal=SIGINT TimeoutSec=2m PrivateTmp=false galera-3-25.3.20/garb/SConscript0000644000015300001660000000272413042054732016070 0ustar jenkinsjenkins# Copyright (C) 2011 Codership Oy Import('env', 'libboost_program_options') garb_env = env.Clone() # Include paths garb_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcs/src ''')) garb_env.Append(CPPFLAGS = ' -DGCS_FOR_GARB') garb_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) garb_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) garb_env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) garb_env.Prepend(LIBS=File('#/gcs/src/libgcs4garb.a')) if libboost_program_options: garb_env.Append(LIBS=libboost_program_options) # special environment for garb_config.cpp conf_env = garb_env.Clone() Import('GALERA_VER', 'GALERA_REV') conf_env.Append(CPPFLAGS = ' -DGALERA_VER=\\"' + GALERA_VER + '\\"') conf_env.Append(CPPFLAGS = ' -DGALERA_REV=\\"' + GALERA_REV + '\\"') garb = garb_env.Program(target = 'garbd', source = Split(''' garb_logger.cpp garb_gcs.cpp garb_recv_loop.cpp garb_main.cpp ''') + conf_env.SharedObject(['garb_config.cpp']) ) galera-3-25.3.20/garb/garb_gcs.cpp0000644000015300001660000000734113042054732016331 0ustar jenkinsjenkins/* * Copyright (C) 2011-2014 Codership Oy */ #include "garb_gcs.hpp" namespace garb { static int const REPL_PROTO_VER(127); static int const APPL_PROTO_VER(127); Gcs::Gcs (gu::Config& gconf, const std::string& name, const std::string& address, const std::string& group) : closed_ (true), gcs_ (gcs_create (reinterpret_cast(&gconf), NULL, name.c_str(), "", REPL_PROTO_VER, APPL_PROTO_VER)) { if (!gcs_) { gu_throw_fatal << "Failed to create GCS object"; } ssize_t ret = gcs_open (gcs_, group.c_str(), address.c_str(), false); if (ret < 0) { gcs_destroy (gcs_); gu_throw_error(-ret) << "Failed to open connection to group"; } closed_ = false; } Gcs::~Gcs () { if (!closed_) { log_warn << "Destroying non-closed object, bad idea"; close (); } gcs_destroy (gcs_); } void Gcs::recv (gcs_action& act) { again: ssize_t ret = gcs_recv(gcs_, &act); if (gu_unlikely(ret < 0)) { if (-ECANCELED == ret) { ret = gcs_resume_recv (gcs_); if (0 == ret) goto again; } log_fatal << "Receiving from group failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "Receiving from group failed"; } } void Gcs::request_state_transfer (const std::string& request, const std::string& donor) { gcs_seqno_t order; log_info << "Sending state transfer request: '" << request << "', size: " << request.length(); /* Need to substitute the first ':' for \0 */ ssize_t req_len = request.length() + 1 /* \0 */; char* const req_str(reinterpret_cast(::malloc( req_len + 1 /* potentially need one more \0 */))); // cppcheck-suppress nullPointer if (!req_str) { gu_throw_error (ENOMEM) << "Cannot allocate " << req_len << " bytes for state transfer request"; } ::strcpy(req_str, request.c_str()); char* column_ptr = ::strchr(req_str, ':'); if (column_ptr) { *column_ptr = '\0'; } else /* append an empty string */ { req_str[req_len] = '\0'; req_len++; } ssize_t ret; do { gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; // for garb we use the lowest str_version. ret = gcs_request_state_transfer (gcs_, 0, req_str, req_len, donor.c_str(), &ist_uuid, ist_seqno, &order); } while (-EAGAIN == ret && (usleep(1000000), true)); free (req_str); if (ret < 0) { log_fatal << "State transfer request failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "State transfer request failed"; } } void Gcs::join (gcs_seqno_t seqno) { ssize_t ret = gcs_join (gcs_, seqno); if (ret < 0) { log_fatal << "Joining group failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "Joining group failed"; } } void Gcs::set_last_applied (gcs_seqno_t seqno) { (void) gcs_set_last_applied(gcs_, seqno); } void Gcs::close () { if (!closed_) { ssize_t ret = gcs_close (gcs_); if (ret < 0) { log_error << "Failed to close connection to group"; } else { closed_ = true; } } else { log_warn << "Attempt to close a closed connection"; } } } /* namespace garb */ galera-3-25.3.20/garb/garb_gcs.hpp0000644000015300001660000000142113042054732016327 0ustar jenkinsjenkins/* Copyright (C) 2011-2013 Codership Oy */ #ifndef _GARB_GCS_HPP_ #define _GARB_GCS_HPP_ #include #include namespace garb { class Gcs { public: Gcs (gu::Config& conf, const std::string& name, const std::string& address, const std::string& group); ~Gcs (); void recv (gcs_action& act); void request_state_transfer (const std::string& request, const std::string& donor); void join (gcs_seqno_t); void set_last_applied(gcs_seqno_t); void close (); private: bool closed_; gcs_conn_t* gcs_; Gcs (const Gcs&); Gcs& operator= (const Gcs&); }; /* class Gcs */ } /* namespace garb */ #endif /* _GARB_GCS_HPP_ */ galera-3-25.3.20/garb/garb_config.hpp0000644000015300001660000000247613042054732017033 0ustar jenkinsjenkins/* Copyright (C) 2011-2013 Codership Oy */ #ifndef _GARB_CONFIG_HPP_ #define _GARB_CONFIG_HPP_ #include #include namespace garb { class Config { public: static std::string const DEFAULT_SST; // default (empty) SST request Config (int argc, char* argv[]); ~Config () {} bool daemon() const { return daemon_ ; } const std::string& name() const { return name_ ; } const std::string& address() const { return address_; } const std::string& group() const { return group_ ; } const std::string& sst() const { return sst_ ; } const std::string& donor() const { return donor_ ; } const std::string& options() const { return options_; } const std::string& cfg() const { return cfg_ ; } const std::string& log() const { return log_ ; } bool exit() const { return exit_ ; } private: bool daemon_; std::string name_; std::string address_; std::string group_; std::string sst_; std::string donor_; std::string options_; std::string log_; std::string cfg_; bool exit_; /* Exit on --help or --version */ }; /* class Config */ std::ostream& operator << (std::ostream&, const Config&); } /* namespace garb */ #endif /* _GARB_CONFIG_HPP_ */ galera-3-25.3.20/garb/garb_recv_loop.cpp0000644000015300001660000000516513042054732017547 0ustar jenkinsjenkins/* Copyright (C) 2011-2014 Codership Oy */ #include "garb_recv_loop.hpp" #include namespace garb { static Gcs* global_gcs(0); void signal_handler (int signum) { log_info << "Received signal " << signum; global_gcs->close(); } RecvLoop::RecvLoop (const Config& config) : config_(config), gconf_ (), params_(gconf_), parse_ (gconf_, config_.options()), gcs_ (gconf_, config_.name(), config_.address(), config_.group()) { /* set up signal handlers */ global_gcs = &gcs_; struct sigaction sa; memset (&sa, 0, sizeof(sa)); sa.sa_handler = signal_handler; if (sigaction (SIGTERM, &sa, NULL)) { gu_throw_error(errno) << "Falied to install signal hadler for signal " << "SIGTERM"; } if (sigaction (SIGINT, &sa, NULL)) { gu_throw_error(errno) << "Falied to install signal hadler for signal " << "SIGINT"; } loop(); } void RecvLoop::loop() { while (1) { gcs_action act; gcs_.recv (act); switch (act.type) { case GCS_ACT_TORDERED: if (gu_unlikely(!(act.seqno_g & 127))) /* == report_interval_ of 128 */ { gcs_.set_last_applied (act.seqno_g); } break; case GCS_ACT_COMMIT_CUT: break; case GCS_ACT_STATE_REQ: gcs_.join (-ENOSYS); /* we can't donate state */ break; case GCS_ACT_CONF: { const gcs_act_conf_t* const cc (reinterpret_cast(act.buf)); if (cc->conf_id > 0) /* PC */ { if (GCS_NODE_STATE_PRIM == cc->my_state) { gcs_.request_state_transfer (config_.sst(),config_.donor()); gcs_.join(cc->seqno); } } else if (cc->memb_num == 0) // SELF-LEAVE after closing connection { log_info << "Exiting main loop"; return; } if (config_.sst() != Config::DEFAULT_SST) { // we requested custom SST, so we're done here gcs_.close(); } break; } case GCS_ACT_JOIN: case GCS_ACT_SYNC: case GCS_ACT_FLOW: case GCS_ACT_SERVICE: case GCS_ACT_ERROR: case GCS_ACT_UNKNOWN: break; } if (act.buf) { free (const_cast(act.buf)); } } } } /* namespace garb */ galera-3-25.3.20/.bzrignore0000644000015300001660000000135213042054732015141 0ustar jenkinsjenkins#*# *$ *,v *.BAK *.a *.bak *.elc *.exe *.la *.lo *.o *.obj *.orig *.py[oc] *.so *.os *.tmp *.log *.passed *~ .#* .*.sw[nop] .*.tmp ./.python-eggs .DS_Store .arch-ids .arch-inventory .bzr.log .del-* .git .hg .jamdeps.libs .make.state .sconsign* .svn .sw[nop] .tmp* BitKeeper CVS CVS.adm RCS SCCS TAGS _darcs aclocal.m4 autom4te* config.h config.h.in config.log config.status config.sub docs/build/* stamp-h stamp-h.in stamp-h1 scons* .sconf* {arch} galera_check gu_tests gu_tests++ check_gcomm gcs_test gcs_tests cluster.conf nodes.conf tests/conf/*.cnf tests/run tests/out tests/bin/* gcache/src/test gcache/tests/gcache_tests Makefile Makefile.in .deps .libs garb/garbd gcs/src/.garb docs/build/ gcomm/test/ssl_test galerautils/src/gu_fnv_bench galera-3-25.3.20/www.evanjones.ca/0000755000015300001660000000000013042054732016333 5ustar jenkinsjenkinsgalera-3-25.3.20/www.evanjones.ca/crc32c.h0000644000015300001660000000336013042054732017565 0ustar jenkinsjenkins// Copyright 2008,2009,2010 Massachusetts Institute of Technology. // All rights reserved. Use of this source code is governed by a // BSD-style license that can be found in the LICENSE file. /* Codership: stripped off C++ garbage to make this a normal C header */ #ifndef __CRC32C_H__ #define __CRC32C_H__ #include // size_t #include // uint stuff /** Returns the initial value for a CRC32-C computation. */ static inline uint32_t crc32cInit() { return 0xFFFFFFFF; } /** Pointer to a function that computes a CRC32C checksum. @arg crc Previous CRC32C value, or crc32c_init(). @arg data Pointer to the data to be checksummed. @arg length length of the data in bytes. */ typedef uint32_t (*CRC32CFunctionPtr)(uint32_t crc, const void* data, size_t length); /** This will map automatically to the "best" CRC implementation. */ extern CRC32CFunctionPtr crc32c; CRC32CFunctionPtr detectBestCRC32C(); /** Converts a partial CRC32-C computation to the final value. */ static inline uint32_t crc32cFinish(uint32_t crc) { return ~crc; } uint32_t crc32cSarwate (uint32_t crc, const void* data, size_t length); uint32_t crc32cSlicingBy4(uint32_t crc, const void* data, size_t length); uint32_t crc32cSlicingBy8(uint32_t crc, const void* data, size_t length); #if defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) #define CRC32C_x86_64 #endif #if defined(CRC32C_x86_64) || defined(__i386) || defined(_M_X86) #define CRC32C_x86 #endif #if !defined(CRC32C_x86) #define CRC32C_NO_HARDWARE #endif #if !defined(CRC32C_NO_HARDWARE) uint32_t crc32cHardware32(uint32_t crc, const void* data, size_t length); uint32_t crc32cHardware64(uint32_t crc, const void* data, size_t length); #endif /* !CRC32C_NO_HARDWARE */ #endif /* __CRC32C_H__ */ galera-3-25.3.20/www.evanjones.ca/AUTHORS0000644000015300001660000000012013042054732017374 0ustar jenkinsjenkinsThis code was taken almost verbatim from http://www.evanjones.ca/crc32c.tar.bz2 galera-3-25.3.20/www.evanjones.ca/LICENSE0000644000015300001660000000355613042054732017351 0ustar jenkinsjenkinsCopyright (c) 2008,2009,2010 Massachusetts Institute of Technology. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Massachusetts Institute of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Other portions are under the same license from Intel: http://sourceforge.net/projects/slicing-by-8/ /*++ * * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved * * This software program is licensed subject to the BSD License, * available at http://www.opensource.org/licenses/bsd-license.html * * Abstract: The main routine * --*/ galera-3-25.3.20/www.evanjones.ca/crc32c.c0000644000015300001660000011733413042054732017567 0ustar jenkinsjenkins// Copyright 2008,2009,2010 Massachusetts Institute of Technology. // All rights reserved. Use of this source code is governed by a // BSD-style license that can be found in the LICENSE file. // Implementations adapted from Intel's Slicing By 8 Sourceforge Project // http://sourceforge.net/projects/slicing-by-8/ /* * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved * * * This software program is licensed subject to the BSD License, * available at http://www.opensource.org/licenses/bsd-license.html. * * Abstract: * * Tables for software CRC generation */ /* * Copyright (c) 2013-2014 Codership Oy * Concatenated crc32ctables.cc and crc32c.cc, stripped off C++ garbage, * fixed PIC, added support for big-endian CPUs. */ #if defined(__cplusplus) extern "C" { #endif #include "crc32c.h" #include /* Tables generated with code like the following: #define CRCPOLY 0x82f63b78 // reversed 0x1EDC6F41 #define CRCINIT 0xFFFFFFFF void init() { for (uint32_t i = 0; i <= 0xFF; i++) { uint32_t x = i; for (uint32_t j = 0; j < 8; j++) x = (x>>1) ^ (CRCPOLY & (-(int32_t)(x & 1))); g_crc_slicing[0][i] = x; } for (uint32_t i = 0; i <= 0xFF; i++) { uint32_t c = g_crc_slicing[0][i]; for (uint32_t j = 1; j < 8; j++) { c = g_crc_slicing[0][c & 0xFF] ^ (c >> 8); g_crc_slicing[j][i] = c; } } } */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o32[256] = { 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351 }; /* * end of the CRC lookup table crc_tableil8_o32 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o40[256] = { 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483 }; /* * end of the CRC lookup table crc_tableil8_o40 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o48[256] = { 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8 }; /* * end of the CRC lookup table crc_tableil8_o48 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o56[256] = { 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842 }; /* * end of the CRC lookup table crc_tableil8_o56 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o64[256] = { 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3 }; /* * end of the CRC lookup table crc_tableil8_o64 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o72[256] = { 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C }; /* * end of the CRC lookup table crc_tableil8_o72 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o80[256] = { 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F }; /* * end of the CRC lookup table crc_tableil8_o80 */ /* * The following CRC lookup table was generated automagically * using the following model parameters: * * Generator Polynomial = ................. 0x1EDC6F41 * Generator Polynomial Length = .......... 32 bits * Reflected Bits = ....................... TRUE * Table Generation Offset = .............. 32 bits * Number of Slices = ..................... 8 slices * Slice Lengths = ........................ 8 8 8 8 8 8 8 8 * Directory Name = ....................... .\ * File Name = ............................ 8x256_tables.c */ const uint32_t crc_tableil8_o88[256] = { 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 }; /* * end of the CRC lookup table crc_tableil8_o88 */ // Implementations adapted from Intel's Slicing By 8 Sourceforge Project // http://sourceforge.net/projects/slicing-by-8/ /*++ * * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved * * This software program is licensed subject to the BSD License, * available at http://www.opensource.org/licenses/bsd-license.html * * Abstract: The main routine * --*/ /* * Traditional software CRC32 implementation: one byte at a time */ uint32_t crc32cSarwate(uint32_t crc, const void* data, size_t length) { const char* p_buf = (const char*) data; const char* p_end = p_buf + length; while (p_buf < p_end) { crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8); } return crc; } /* * Optimized CRC32 implementations that follow process input in slices * of 4 byte integers and require byteswapping on big-endian platforms, * so here we define byteswapping macro/function. * GCC normally defines __BYTE_ORDER__ so we use that if available. */ #if defined(WITH_GALERA) # include "galerautils/src/gu_byteswap.h" # define CRC32C_TO_LE32 gu_le32 #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) # define CRC32C_TO_LE32(val) val #else # if defined(HAVE_BYTESWAP_H) # include # endif static inline uint32_t CRC32C_TO_LE32(uint32_t const val) { #if !defined(__BYTE_ORDER__) /* determine endianness in runtime and return if LE */ static union { uint32_t n; uint8_t little_endian; } const u = { 1 }; if (u.little_endian) return val; #elif (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) # error "Broken macro logic!" #endif /* __BYTE_ORDER__ */ #if defined(bswap32) return bswap32(val); #elif defined(bswap_32) return bswap_32(val); #elif defined(BSWAP_32) return BSWAP_32(val); #else return ((val << 24) | ((val & 0x0000FF00) << 8) | (val >> 24) | ((val & 0x00FF0000) >> 8)); #endif /* bswap32 */ } #endif /* WITH_GALERA */ uint32_t crc32cSlicingBy4(uint32_t crc, const void* data, size_t length) { const char* p_buf = (const char*) data; // Handle leading misaligned bytes size_t initial_bytes = (sizeof(int32_t) - (intptr_t)p_buf) & (sizeof(int32_t) - 1); if (length < initial_bytes) initial_bytes = length; for (size_t li = 0; li < initial_bytes; li++) { crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8); } length -= initial_bytes; size_t running_length = length & ~(sizeof(int32_t) - 1); size_t end_bytes = length - running_length; for (size_t li = 0; li < running_length/4; li++) { crc ^= CRC32C_TO_LE32(*(const uint32_t*)p_buf); p_buf += 4; uint32_t term1 = crc_tableil8_o56[crc & 0x000000FF] ^ crc_tableil8_o48[(crc >> 8) & 0x000000FF]; uint32_t term2 = crc >> 16; crc = term1 ^ crc_tableil8_o40[term2 & 0x000000FF] ^ crc_tableil8_o32[(term2 >> 8) & 0x000000FF]; } for (size_t li=0; li < end_bytes; li++) { crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8); } return crc; } uint32_t crc32cSlicingBy8(uint32_t crc, const void* data, size_t length) { const char* p_buf = (const char*) data; // Handle leading misaligned bytes size_t initial_bytes = (sizeof(int32_t) - (intptr_t)p_buf) & (sizeof(int32_t) - 1); if (length < initial_bytes) initial_bytes = length; for (size_t li = 0; li < initial_bytes; li++) { crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8); } length -= initial_bytes; size_t running_length = length & ~(sizeof(uint64_t) - 1); size_t end_bytes = length - running_length; for (size_t li = 0; li < running_length/8; li++) { const uint32_t* const slices = (const uint32_t*)p_buf; crc ^= CRC32C_TO_LE32(slices[0]); uint32_t term1 = crc_tableil8_o88[crc & 0x000000FF] ^ crc_tableil8_o80[(crc >> 8) & 0x000000FF]; uint32_t term2 = crc >> 16; crc = term1 ^ crc_tableil8_o72[term2 & 0x000000FF] ^ crc_tableil8_o64[(term2 >> 8) & 0x000000FF]; uint32_t const slice2 = CRC32C_TO_LE32(slices[1]); term1 = crc_tableil8_o56[slice2 & 0x000000FF] ^ crc_tableil8_o48[(slice2 >> 8) & 0x000000FF]; term2 = slice2 >> 16; crc = crc ^ term1 ^ crc_tableil8_o40[term2 & 0x000000FF] ^ crc_tableil8_o32[(term2 >> 8) & 0x000000FF]; p_buf += 8; } for (size_t li=0; li < end_bytes; li++) { crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8); } return crc; } #if !defined(CRC32C_NO_HARDWARE) static uint32_t cpuid(uint32_t functionInput) { uint32_t eax; uint32_t ebx; uint32_t ecx; uint32_t edx; /* The code below adapted from http://en.wikipedia.org/wiki/CPUID * and seems to work for both PIC and non-PIC cases */ __asm__ __volatile__( #if defined(CRC32C_x86_64) "pushq %%rbx \n\t" /* save %rbx */ #else /* 32-bit */ "pushl %%ebx \n\t" /* save %ebx */ #endif "cpuid \n\t" "movl %%ebx, %[ebx] \n\t" /* copy %ebx contents into output var */ #if defined(CRC32C_x86_64) "popq %%rbx \n\t" /* restore %rbx */ #else /* 32-bit */ "popl %%ebx \n\t" /* restore %ebx */ #endif : "=a"(eax), [ebx] "=r"(ebx), "=c"(ecx), "=d"(edx) : "a"(functionInput) ); return ecx; } CRC32CFunctionPtr detectBestCRC32C() { static const int SSE42_BIT = 20; uint32_t ecx = cpuid(1); bool hasSSE42 = ecx & (1 << SSE42_BIT); if (hasSSE42) { #if defined(CRC32C_x86_64) return crc32cHardware64; #else return crc32cHardware32; #endif } else { return crc32cSlicingBy8; } } #include // Hardware-accelerated CRC-32C (using CRC32 instruction) uint32_t crc32cHardware32(uint32_t crc, const void* data, size_t length) { const char* p_buf = (const char*) data; // alignment doesn't seem to help? for (size_t i = 0; i < length / sizeof(uint32_t); i++) { crc = __builtin_ia32_crc32si(crc, *(uint32_t*) p_buf); p_buf += sizeof(uint32_t); } // This ugly switch is slightly faster for short strings than the straightforward loop length &= sizeof(uint32_t) - 1; /* while (length > 0) { crc32bit = __builtin_ia32_crc32qi(crc32bit, *p_buf++); length--; } */ switch (length) { case 3: crc = __builtin_ia32_crc32qi(crc, *p_buf++); case 2: crc = __builtin_ia32_crc32hi(crc, *(uint16_t*) p_buf); break; case 1: crc = __builtin_ia32_crc32qi(crc, *p_buf); break; case 0: break; default: // This should never happen; enable in debug code assert(false); } return crc; } // Hardware-accelerated CRC-32C (using CRC64 instruction) uint32_t crc32cHardware64(uint32_t crc, const void* data, size_t length) { #ifndef __LP64__ return crc32cHardware32(crc, data, length); #else const char* p_buf = (const char*) data; // alignment doesn't seem to help? uint64_t crc64bit = crc; for (size_t i = 0; i < length / sizeof(uint64_t); i++) { crc64bit = __builtin_ia32_crc32di(crc64bit, *(uint64_t*) p_buf); p_buf += sizeof(uint64_t); } // This ugly switch is slightly faster for short strings than the straightforward loop uint32_t crc32bit = (uint32_t) crc64bit; length &= sizeof(uint64_t) - 1; /* while (length > 0) { crc32bit = __builtin_ia32_crc32qi(crc32bit, *p_buf++); length--; } */ switch (length) { case 7: crc32bit = __builtin_ia32_crc32qi(crc32bit, *p_buf++); case 6: crc32bit = __builtin_ia32_crc32hi(crc32bit, *(uint16_t*) p_buf); p_buf += 2; // case 5 is below: 4 + 1 case 4: crc32bit = __builtin_ia32_crc32si(crc32bit, *(uint32_t*) p_buf); break; case 3: crc32bit = __builtin_ia32_crc32qi(crc32bit, *p_buf++); case 2: crc32bit = __builtin_ia32_crc32hi(crc32bit, *(uint16_t*) p_buf); break; case 5: crc32bit = __builtin_ia32_crc32si(crc32bit, *(uint32_t*) p_buf); p_buf += 4; case 1: crc32bit = __builtin_ia32_crc32qi(crc32bit, *p_buf); break; case 0: break; default: // This should never happen; enable in debug code assert(false); } return crc32bit; #endif /* __LP64__ */ } #else /* no CRC32C HW acceleration */ CRC32CFunctionPtr detectBestCRC32C() { /* this actually requires some benchmarking... */ return crc32cSlicingBy8; } #endif /* CRC32C_NO_HARDWARE */ static uint32_t crc32c_CPUDetection(uint32_t crc, const void* data, size_t length) { // Avoid issues that could potentially be caused by multiple threads: use a local variable CRC32CFunctionPtr best = detectBestCRC32C(); crc32c = best; return best(crc, data, length); } CRC32CFunctionPtr crc32c = crc32c_CPUDetection; #if defined(__cplusplus) } #endif galera-3-25.3.20/CONTRIBUTORS.txt0000644000015300001660000000313413042054732015635 0ustar jenkinsjenkinsAll contributors are required to add their name and [Github username/email] to this file in connection with their first contribution. If you are making a contribution on behalf of a company, you should add the said company name. By adding your name and [Github username/email] to this file you agree that your contribution is a contribution under a contributor agreement between you and Codership Oy. To the extent that you are an employee of a company and contribute in that role, you confirm that your contribution is a contribution under the contribution license agreement between your employer and Codership Oy; and that you have the authorization to give such confirmation. You confirm that you have read, understood and signed the contributor license agreement applicable to you. For the individual contributor agreement see file CONTRIBUTOR_AGREEMENT.txt in the same directory as this file. Authors from Codership Oy: * Alexey Yurchenko , Codership Oy * Seppo Jaakola , Codership Oy * Teemu Ollakka , Codership Oy * Daniele Sciascia , Codership Oy * Philip Stoev , Codership Oy [Codership employees, add name and email/username above this line, but leave this line intact] Other contributors: * Stefan Langenmaier * Christian Hesse * Andrzej Godziuk * Otto Kekäläinen , Seravo Oy [add name and email/username above this line, but leave this line intact] galera-3-25.3.20/common/0000755000015300001660000000000013042054732014426 5ustar jenkinsjenkinsgalera-3-25.3.20/common/common.h0000644000015300001660000000100513042054732016063 0ustar jenkinsjenkins/* *Copyright (C) 2012-2014 Codership Oy */ /*! @file Stores some common definitions to be known throughout the modules */ #ifndef COMMON_DEFS_H #define COMMON_DEFS_H #define COMMON_BASE_HOST_KEY "base_host" #define COMMON_BASE_PORT_KEY "base_port" #define COMMON_BASE_PORT_DEFAULT "4567" #define COMMON_BASE_DIR_KEY "base_dir" #define COMMON_BASE_DIR_DEFAULT "." #define COMMON_STATE_FILE "grastate.dat" #define COMMON_VIEW_STAT_FILE "gvwstate.dat" #endif // COMMON_DEFS_H galera-3-25.3.20/common/wsrep_api.h0000644000015300001660000011525613042054732016602 0ustar jenkinsjenkins/* Copyright (C) 2009-2013 Codership Oy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /*! @file wsrep API declaration. HOW TO READ THIS FILE. Due to C language rules this header layout doesn't lend itself to intuitive reading. So here's the scoop: in the end this header declares two main types: * struct wsrep_init_args and * struct wsrep wsrep_init_args contains initialization parameters for wsrep provider like names, addresses, etc. and pointers to callbacks. The callbacks will be called by provider when it needs to do something application-specific, like log a message or apply a writeset. It should be passed to init() call from wsrep API. It is an application part of wsrep API contract. struct wsrep is the interface to wsrep provider. It contains all wsrep API calls. It is a provider part of wsrep API contract. Finally, wsrep_load() method loads (dlopens) wsrep provider library. It is defined in wsrep_loader.c unit and is part of libwsrep.a (which is not a wsrep provider, but a convenience library). wsrep_unload() does the reverse. */ #ifndef WSREP_H #define WSREP_H #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /************************************************************************** * * * wsrep replication API * * * **************************************************************************/ #define WSREP_INTERFACE_VERSION "25" /*! Empty backend spec */ #define WSREP_NONE "none" /*! * @brief log severity levels, passed as first argument to log handler */ typedef enum wsrep_log_level { WSREP_LOG_FATAL, //!< Unrecoverable error, application must quit. WSREP_LOG_ERROR, //!< Operation failed, must be repeated. WSREP_LOG_WARN, //!< Unexpected condition, but no operational failure. WSREP_LOG_INFO, //!< Informational message. WSREP_LOG_DEBUG //!< Debug message. Shows only of compiled with debug. } wsrep_log_level_t; /*! * @brief error log handler * * All messages from wsrep provider are directed to this * handler, if present. * * @param level log level * @param message log message */ typedef void (*wsrep_log_cb_t)(wsrep_log_level_t, const char *); /*! * Certain provider capabilities application may want to know about */ #define WSREP_CAP_MULTI_MASTER ( 1ULL << 0 ) #define WSREP_CAP_CERTIFICATION ( 1ULL << 1 ) #define WSREP_CAP_PARALLEL_APPLYING ( 1ULL << 2 ) #define WSREP_CAP_TRX_REPLAY ( 1ULL << 3 ) #define WSREP_CAP_ISOLATION ( 1ULL << 4 ) #define WSREP_CAP_PAUSE ( 1ULL << 5 ) #define WSREP_CAP_CAUSAL_READS ( 1ULL << 6 ) #define WSREP_CAP_CAUSAL_TRX ( 1ULL << 7 ) #define WSREP_CAP_INCREMENTAL_WRITESET ( 1ULL << 8 ) #define WSREP_CAP_SESSION_LOCKS ( 1ULL << 9 ) #define WSREP_CAP_DISTRIBUTED_LOCKS ( 1ULL << 10 ) #define WSREP_CAP_CONSISTENCY_CHECK ( 1ULL << 11 ) #define WSREP_CAP_UNORDERED ( 1ULL << 12 ) #define WSREP_CAP_ANNOTATION ( 1ULL << 13 ) #define WSREP_CAP_PREORDERED ( 1ULL << 14 ) /*! * Writeset flags * * COMMIT the writeset and all preceding writesets must be committed * ROLLBACK all preceding writesets in a transaction must be rolled back * ISOLATION the writeset must be applied AND committed in isolation * PA_UNSAFE the writeset cannot be applied in parallel * COMMUTATIVE the order in which the writeset is applied does not matter * NATIVE the writeset contains another writeset in this provider format * * Note that some of the flags are mutually exclusive (e.g. COMMIT and * ROLLBACK). */ #define WSREP_FLAG_COMMIT ( 1ULL << 0 ) #define WSREP_FLAG_ROLLBACK ( 1ULL << 1 ) #define WSREP_FLAG_ISOLATION ( 1ULL << 2 ) #define WSREP_FLAG_PA_UNSAFE ( 1ULL << 3 ) #define WSREP_FLAG_COMMUTATIVE ( 1ULL << 4 ) #define WSREP_FLAG_NATIVE ( 1ULL << 5 ) typedef uint64_t wsrep_trx_id_t; //!< application transaction ID typedef uint64_t wsrep_conn_id_t; //!< application connection ID typedef int64_t wsrep_seqno_t; //!< sequence number of a writeset, etc. #ifdef __cplusplus typedef bool wsrep_bool_t; #else typedef _Bool wsrep_bool_t; //!< should be the same as standard (C99) bool #endif /* __cplusplus */ /*! undefined seqno */ #define WSREP_SEQNO_UNDEFINED (-1) /*! wsrep provider status codes */ typedef enum wsrep_status { WSREP_OK = 0, //!< success WSREP_WARNING, //!< minor warning, error logged WSREP_TRX_MISSING, //!< transaction is not known by wsrep WSREP_TRX_FAIL, //!< transaction aborted, server can continue WSREP_BF_ABORT, //!< trx was victim of brute force abort WSREP_SIZE_EXCEEDED, //!< data exceeded maximum supported size WSREP_CONN_FAIL, //!< error in client connection, must abort WSREP_NODE_FAIL, //!< error in node state, wsrep must reinit WSREP_FATAL, //!< fatal error, server must abort WSREP_NOT_IMPLEMENTED //!< feature not implemented } wsrep_status_t; /*! wsrep callbacks status codes */ typedef enum wsrep_cb_status { WSREP_CB_SUCCESS = 0, //!< success (as in "not critical failure") WSREP_CB_FAILURE //!< critical failure (consistency violation) /* Technically, wsrep provider has no use for specific failure codes since * there is nothing it can do about it but abort execution. Therefore any * positive number shall indicate a critical failure. Optionally that value * may be used by provider to come to a consensus about state consistency * in a group of nodes. */ } wsrep_cb_status_t; /*! * UUID type - for all unique IDs */ typedef struct wsrep_uuid { uint8_t data[16]; } wsrep_uuid_t; /*! Undefined UUID */ static const wsrep_uuid_t WSREP_UUID_UNDEFINED = {{0,}}; /*! UUID string representation length, terminating '\0' not included */ #define WSREP_UUID_STR_LEN 36 /*! * Scan UUID from string * @return length of UUID string representation or negative error code */ extern int wsrep_uuid_scan (const char* str, size_t str_len, wsrep_uuid_t* uuid); /*! * Print UUID to string * @return length of UUID string representation or negative error code */ extern int wsrep_uuid_print (const wsrep_uuid_t* uuid, char* str, size_t str_len); #define WSREP_MEMBER_NAME_LEN 32 //!< maximum logical member name length #define WSREP_INCOMING_LEN 256 //!< max Domain Name length + 0x00 /*! * Global transaction identifier */ typedef struct wsrep_gtid { wsrep_uuid_t uuid; /*!< History UUID */ wsrep_seqno_t seqno; /*!< Sequence number */ } wsrep_gtid_t; /*! Undefined GTID */ static const wsrep_gtid_t WSREP_GTID_UNDEFINED = {{{0, }}, -1}; /*! Minimum number of bytes guaranteed to store GTID string representation, * terminating '\0' not included (36 + 1 + 20) */ #define WSREP_GTID_STR_LEN 57 /*! * Scan GTID from string * @return length of GTID string representation or negative error code */ extern int wsrep_gtid_scan(const char* str, size_t str_len, wsrep_gtid_t* gtid); /*! * Print GTID to string * @return length of GTID string representation or negative error code */ extern int wsrep_gtid_print(const wsrep_gtid_t* gtid, char* str, size_t str_len); /*! * Transaction meta data */ typedef struct wsrep_trx_meta { wsrep_gtid_t gtid; /*!< Global transaction identifier */ wsrep_seqno_t depends_on; /*!< Sequence number part of the last transaction this transaction depends on */ } wsrep_trx_meta_t; /*! * member status */ typedef enum wsrep_member_status { WSREP_MEMBER_UNDEFINED, //!< undefined state WSREP_MEMBER_JOINER, //!< incomplete state, requested state transfer WSREP_MEMBER_DONOR, //!< complete state, donates state transfer WSREP_MEMBER_JOINED, //!< complete state WSREP_MEMBER_SYNCED, //!< complete state, synchronized with group WSREP_MEMBER_ERROR, //!< this and above is provider-specific error code WSREP_MEMBER_MAX } wsrep_member_status_t; /*! * static information about a group member (some fields are tentative yet) */ typedef struct wsrep_member_info { wsrep_uuid_t id; //!< group-wide unique member ID char name[WSREP_MEMBER_NAME_LEN]; //!< human-readable name char incoming[WSREP_INCOMING_LEN]; //!< address for client requests } wsrep_member_info_t; /*! * group status */ typedef enum wsrep_view_status { WSREP_VIEW_PRIMARY, //!< primary group configuration (quorum present) WSREP_VIEW_NON_PRIMARY, //!< non-primary group configuration (quorum lost) WSREP_VIEW_DISCONNECTED, //!< not connected to group, retrying. WSREP_VIEW_MAX } wsrep_view_status_t; /*! * view of the group */ typedef struct wsrep_view_info { wsrep_gtid_t state_id; //!< global state ID wsrep_seqno_t view; //!< global view number wsrep_view_status_t status; //!< view status wsrep_bool_t state_gap; //!< gap between global and local states int my_idx; //!< index of this member in the view int memb_num; //!< number of members in the view int proto_ver; //!< application protocol agreed on the view wsrep_member_info_t members[1];//!< array of member information } wsrep_view_info_t; /*! * Magic string to tell provider to engage into trivial (empty) state transfer. * No data will be passed, but the node shall be considered JOINED. * Should be passed in sst_req parameter of wsrep_view_cb_t. */ #define WSREP_STATE_TRANSFER_TRIVIAL "trivial" /*! * Magic string to tell provider not to engage in state transfer at all. * The member will stay in WSREP_MEMBER_UNDEFINED state but will keep on * receiving all writesets. * Should be passed in sst_req parameter of wsrep_view_cb_t. */ #define WSREP_STATE_TRANSFER_NONE "none" /*! * @brief group view handler * * This handler is called in total order corresponding to the group * configuration change. It is to provide a vital information about * new group view. If view info indicates existence of discontinuity * between group and member states, state transfer request message * should be filled in by the callback implementation. * * @note Currently it is assumed that sst_req is allocated using * malloc()/calloc()/realloc() and it will be freed by * wsrep implementation. * * @param app_ctx application context * @param recv_ctx receiver context * @param view new view on the group * @param state current state * @param state_len lenght of current state * @param sst_req location to store SST request * @param sst_req_len location to store SST request length or error code, * value of 0 means no SST. */ typedef enum wsrep_cb_status (*wsrep_view_cb_t) ( void* app_ctx, void* recv_ctx, const wsrep_view_info_t* view, const char* state, size_t state_len, void** sst_req, size_t* sst_req_len ); /*! * @brief apply callback * * This handler is called from wsrep library to apply replicated writeset * Must support brute force applying for multi-master operation * * @param recv_ctx receiver context pointer provided by the application * @param data data buffer containing the writeset * @param size data buffer size * @param flags WSREP_FLAG_... flags * @param meta transaction meta data of the writeset to be applied * * @return success code: * @retval WSREP_OK * @retval WSREP_NOT_IMPLEMENTED appl. does not support the writeset format * @retval WSREP_ERROR failed to apply the writeset */ typedef enum wsrep_cb_status (*wsrep_apply_cb_t) ( void* recv_ctx, const void* data, size_t size, uint32_t flags, const wsrep_trx_meta_t* meta ); /*! * @brief commit callback * * This handler is called to commit the changes made by apply callback. * * @param recv_ctx receiver context pointer provided by the application * @param flags WSREP_FLAG_... flags * @param meta transaction meta data of the writeset to be committed * @param exit set to true to exit recv loop * @param commit true - commit writeset, false - rollback writeset * * @return success code: * @retval WSREP_OK * @retval WSREP_ERROR call failed */ typedef enum wsrep_cb_status (*wsrep_commit_cb_t) ( void* recv_ctx, uint32_t flags, const wsrep_trx_meta_t* meta, wsrep_bool_t* exit, wsrep_bool_t commit ); /*! * @brief unordered callback * * This handler is called to execute unordered actions (actions that need not * to be executed in any particular order) attached to writeset. * * @param recv_ctx receiver context pointer provided by the application * @param data data buffer containing the writeset * @param size data buffer size */ typedef enum wsrep_cb_status (*wsrep_unordered_cb_t) ( void* recv_ctx, const void* data, size_t size ); /*! * @brief a callback to donate state snapshot * * This handler is called from wsrep library when it needs this node * to deliver state to a new cluster member. * No state changes will be committed for the duration of this call. * Wsrep implementation may provide internal state to be transmitted * to new cluster member for initial state. * * @param app_ctx application context * @param recv_ctx receiver context * @param msg state transfer request message * @param msg_len state transfer request message length * @param gtid current state ID on this node * @param state current wsrep internal state buffer * @param state_len current wsrep internal state buffer len * @param bypass bypass snapshot transfer, only transfer uuid:seqno pair */ typedef enum wsrep_cb_status (*wsrep_sst_donate_cb_t) ( void* app_ctx, void* recv_ctx, const void* msg, size_t msg_len, const wsrep_gtid_t* state_id, const char* state, size_t state_len, wsrep_bool_t bypass ); /*! * @brief a callback to signal application that wsrep state is synced * with cluster * * This callback is called after wsrep library has got in sync with * rest of the cluster. * * @param app_ctx application context */ typedef void (*wsrep_synced_cb_t) (void* app_ctx); /*! * Initialization parameters for wsrep provider. */ struct wsrep_init_args { void* app_ctx; //!< Application context for callbacks /* Configuration parameters */ const char* node_name; //!< Symbolic name of this node (e.g. hostname) const char* node_address; //!< Address to be used by wsrep provider const char* node_incoming; //!< Address for incoming client connections const char* data_dir; //!< Directory where wsrep files are kept if any const char* options; //!< Provider-specific configuration string int proto_ver; //!< Max supported application protocol version /* Application initial state information. */ const wsrep_gtid_t* state_id; //!< Application state GTID const char* state; //!< Initial state for wsrep provider size_t state_len; //!< Length of state buffer /* Application callbacks */ wsrep_log_cb_t logger_cb; //!< logging handler wsrep_view_cb_t view_handler_cb; //!< group view change handler /* Applier callbacks */ wsrep_apply_cb_t apply_cb; //!< apply callback wsrep_commit_cb_t commit_cb; //!< commit callback wsrep_unordered_cb_t unordered_cb; //!< callback for unordered actions /* State Snapshot Transfer callbacks */ wsrep_sst_donate_cb_t sst_donate_cb; //!< starting to donate wsrep_synced_cb_t synced_cb; //!< synced with group }; /*! Type of the stats variable value in struct wsrep_status_var */ typedef enum wsrep_var_type { WSREP_VAR_STRING, //!< pointer to null-terminated string WSREP_VAR_INT64, //!< int64_t WSREP_VAR_DOUBLE //!< double } wsrep_var_type_t; /*! Generalized stats variable representation */ struct wsrep_stats_var { const char* name; //!< variable name wsrep_var_type_t type; //!< variable value type union { int64_t _int64; double _double; const char* _string; } value; //!< variable value }; /*! Abstract data buffer structure */ typedef struct wsrep_buf { const void* ptr; /*!< Pointer to data buffer */ size_t len; /*!< Length of buffer */ } wsrep_buf_t; /*! Key struct used to pass certification keys for transaction handling calls. * A key consists of zero or more key parts. */ typedef struct wsrep_key { const wsrep_buf_t* key_parts; /*!< Array of key parts */ size_t key_parts_num; /*!< Number of key parts */ } wsrep_key_t; /*! Key type: * EXCLUSIVE conflicts with any key type * SEMI reserved. If not supported, should be interpeted as EXCLUSIVE * SHARED conflicts only with EXCLUSIVE keys */ typedef enum wsrep_key_type { WSREP_KEY_SHARED = 0, WSREP_KEY_SEMI, WSREP_KEY_EXCLUSIVE } wsrep_key_type_t; /*! Data type: * ORDERED state modification event that should be applied and committed * in order. * UNORDERED some action that does not modify state and execution of which is * optional and does not need to happen in order. * ANNOTATION (human readable) writeset annotation. */ typedef enum wsrep_data_type { WSREP_DATA_ORDERED = 0, WSREP_DATA_UNORDERED, WSREP_DATA_ANNOTATION } wsrep_data_type_t; /*! Transaction handle struct passed for wsrep transaction handling calls */ typedef struct wsrep_ws_handle { wsrep_trx_id_t trx_id; //!< transaction ID void* opaque; //!< opaque provider transaction context data } wsrep_ws_handle_t; /*! * @brief Helper method to reset trx writeset handle state when trx id changes * * Instead of passing wsrep_ws_handle_t directly to wsrep calls, * wrapping handle with this call offloads bookkeeping from * application. */ static inline wsrep_ws_handle_t* wsrep_ws_handle_for_trx( wsrep_ws_handle_t* ws_handle, wsrep_trx_id_t trx_id) { if (ws_handle->trx_id != trx_id) { ws_handle->trx_id = trx_id; ws_handle->opaque = NULL; } return ws_handle; } /*! * A handle for processing preordered actions. * Must be initialized to WSREP_PO_INITIALIZER before use. */ typedef struct wsrep_po_handle { void* opaque; } wsrep_po_handle_t; static const wsrep_po_handle_t WSREP_PO_INITIALIZER = { NULL }; typedef struct wsrep wsrep_t; /*! * wsrep interface for dynamically loadable libraries */ struct wsrep { const char *version; //!< interface version string /*! * @brief Initializes wsrep provider * * @param wsrep provider handle * @param args wsrep initialization parameters */ wsrep_status_t (*init) (wsrep_t* wsrep, const struct wsrep_init_args* args); /*! * @brief Returns provider capabilities flag bitmap * * @param wsrep provider handle */ uint64_t (*capabilities) (wsrep_t* wsrep); /*! * @brief Passes provider-specific configuration string to provider. * * @param wsrep provider handle * @param conf configuration string * * @retval WSREP_OK configuration string was parsed successfully * @retval WSREP_WARNING could't not parse conf string, no action taken */ wsrep_status_t (*options_set) (wsrep_t* wsrep, const char* conf); /*! * @brief Returns provider-specific string with current configuration values. * * @param wsrep provider handle * * @return a dynamically allocated string with current configuration * parameter values */ char* (*options_get) (wsrep_t* wsrep); /*! * @brief Opens connection to cluster * * Returns when either node is ready to operate as a part of the clsuter * or fails to reach operating status. * * @param wsrep provider handle * @param cluster_name unique symbolic cluster name * @param cluster_url URL-like cluster address (backend://address) * @param state_donor name of the node to be asked for state transfer. * @param bootstrap a flag to request initialization of a new wsrep * service rather then a connection to the existing one. * clister_url may still carry important initialization * parameters, like backend spec and/or listen address. */ wsrep_status_t (*connect) (wsrep_t* wsrep, const char* cluster_name, const char* cluster_url, const char* state_donor, wsrep_bool_t bootstrap); /*! * @brief Closes connection to cluster. * * If state_uuid and/or state_seqno is not NULL, will store final state * in there. * * @param wsrep this wsrep handler */ wsrep_status_t (*disconnect)(wsrep_t* wsrep); /*! * @brief start receiving replication events * * This function never returns * * @param wsrep provider handle * @param recv_ctx receiver context */ wsrep_status_t (*recv)(wsrep_t* wsrep, void* recv_ctx); /*! * @brief Replicates/logs result of transaction to other nodes and allocates * required resources. * * Must be called before transaction commit. Returns success code, which * caller must check. * In case of WSREP_OK, starts commit critical section, transaction can * commit. Otherwise transaction must rollback. * * @param wsrep provider handle * @param ws_handle writeset of committing transaction * @param conn_id connection ID * @param flags fine tuning the replication WSREP_FLAG_* * @param meta transaction meta data * * @retval WSREP_OK cluster-wide commit succeeded * @retval WSREP_TRX_FAIL must rollback transaction * @retval WSREP_CONN_FAIL must close client connection * @retval WSREP_NODE_FAIL must close all connections and reinit */ wsrep_status_t (*pre_commit)(wsrep_t* wsrep, wsrep_conn_id_t conn_id, wsrep_ws_handle_t* ws_handle, uint32_t flags, wsrep_trx_meta_t* meta); /*! * @brief Releases resources after transaction commit. * * Ends commit critical section. * * @param wsrep provider handle * @param ws_handle writeset of committing transaction * @retval WSREP_OK post_commit succeeded */ wsrep_status_t (*post_commit) (wsrep_t* wsrep, wsrep_ws_handle_t* ws_handle); /*! * @brief Releases resources after transaction rollback. * * @param wsrep provider handle * @param ws_handle writeset of committing transaction * @retval WSREP_OK post_rollback succeeded */ wsrep_status_t (*post_rollback)(wsrep_t* wsrep, wsrep_ws_handle_t* ws_handle); /*! * @brief Replay trx as a slave writeset * * If local trx has been aborted by brute force, and it has already * replicated before this abort, we must try if we can apply it as * slave trx. Note that slave nodes see only trx writesets and certification * test based on write set content can be different to DBMS lock conflicts. * * @param wsrep provider handle * @param ws_handle writeset of committing transaction * @param trx_ctx transaction context * * @retval WSREP_OK cluster commit succeeded * @retval WSREP_TRX_FAIL must rollback transaction * @retval WSREP_BF_ABORT brute force abort happened after trx replicated * must rollback transaction and try to replay * @retval WSREP_CONN_FAIL must close client connection * @retval WSREP_NODE_FAIL must close all connections and reinit */ wsrep_status_t (*replay_trx)(wsrep_t* wsrep, wsrep_ws_handle_t* ws_handle, void* trx_ctx); /*! * @brief Abort pre_commit() call of another thread. * * It is possible, that some high-priority transaction needs to abort * another transaction which is in pre_commit() call waiting for resources. * * The kill routine checks that abort is not attmpted against a transaction * which is front of the caller (in total order). * * @param wsrep provider handle * @param bf_seqno seqno of brute force trx, running this cancel * @param victim_trx transaction to be aborted, and which is committing * * @retval WSREP_OK abort secceded * @retval WSREP_WARNING abort failed */ wsrep_status_t (*abort_pre_commit)(wsrep_t* wsrep, wsrep_seqno_t bf_seqno, wsrep_trx_id_t victim_trx); /*! * @brief Appends a row reference to transaction writeset * * Both copy flag and key_type can be ignored by provider (key type * interpreted as WSREP_KEY_EXCLUSIVE). * * @param wsrep provider handle * @param ws_handle writeset handle * @param keys array of keys * @param count length of the array of keys * @param type type ot the key * @param copy can be set to FALSE if keys persist through commit. */ wsrep_status_t (*append_key)(wsrep_t* wsrep, wsrep_ws_handle_t* ws_handle, const wsrep_key_t* keys, size_t count, enum wsrep_key_type type, wsrep_bool_t copy); /*! * @brief Appends data to transaction writeset * * This method can be called any time before commit and it * appends a number of data buffers to transaction writeset. * * Both copy and unordered flags can be ignored by provider. * * @param wsrep provider handle * @param ws_handle writeset handle * @param data array of data buffers * @param count buffer count * @param type type of data * @param copy can be set to FALSE if data persists through commit. */ wsrep_status_t (*append_data)(wsrep_t* wsrep, wsrep_ws_handle_t* ws_handle, const struct wsrep_buf* data, size_t count, enum wsrep_data_type type, wsrep_bool_t copy); /*! * @brief Get causal ordering for read operation * * This call will block until causal ordering with all possible * preceding writes in the cluster is guaranteed. If pointer to * gtid is non-null, the call stores the global transaction ID * of the last transaction which is guaranteed to be ordered * causally before this call. * * @param wsrep provider handle * @param gtid location to store GTID */ wsrep_status_t (*causal_read)(wsrep_t* wsrep, wsrep_gtid_t* gtid); /*! * @brief Clears allocated connection context. * * Whenever a new connection ID is passed to wsrep provider through * any of the API calls, a connection context is allocated for this * connection. This call is to explicitly notify provider to close the * connection. * * @param wsrep provider handle * @param conn_id connection ID * @param query the 'set database' query * @param query_len length of query (does not end with 0) */ wsrep_status_t (*free_connection)(wsrep_t* wsrep, wsrep_conn_id_t conn_id); /*! * @brief Replicates a query and starts "total order isolation" section. * * Replicates the action spec and returns success code, which caller must * check. Total order isolation continues until to_execute_end() is called. * * @param wsrep provider handle * @param conn_id connection ID * @param keys array of keys * @param keys_num lenght of the array of keys * @param action action buffer array to be executed * @param count action buffer count * @param meta transaction meta data * * @retval WSREP_OK cluster commit succeeded * @retval WSREP_CONN_FAIL must close client connection * @retval WSREP_NODE_FAIL must close all connections and reinit */ wsrep_status_t (*to_execute_start)(wsrep_t* wsrep, wsrep_conn_id_t conn_id, const wsrep_key_t* keys, size_t keys_num, const struct wsrep_buf* action, size_t count, wsrep_trx_meta_t* meta); /*! * @brief Ends the total order isolation section. * * Marks the end of total order isolation. TO locks are freed * and other transactions are free to commit from this point on. * * @param wsrep provider handle * @param conn_id connection ID * * @retval WSREP_OK cluster commit succeeded * @retval WSREP_CONN_FAIL must close client connection * @retval WSREP_NODE_FAIL must close all connections and reinit */ wsrep_status_t (*to_execute_end)(wsrep_t* wsrep, wsrep_conn_id_t conn_id); /*! * @brief Collects preordered replication events into a writeset. * * @param wsrep wsrep provider handle * @param handle a handle associated with a given writeset * @param data an array of data buffers. * @param count length of data buffer array. * @param copy whether provider needs to make a copy of events. * * @retval WSREP_OK cluster-wide commit succeeded * @retval WSREP_TRX_FAIL operation failed (e.g. trx size exceeded limit) * @retval WSREP_NODE_FAIL must close all connections and reinit */ wsrep_status_t (*preordered_collect) (wsrep_t* wsrep, wsrep_po_handle_t* handle, const struct wsrep_buf* data, size_t count, wsrep_bool_t copy); /*! * @brief "Commits" preordered writeset to cluster. * * The contract is that the writeset will be committed in the same (partial) * order this method was called. Frees resources associated with the writeset * handle and reinitializes the handle. * * @param wsrep wsrep provider handle * @param po_handle a handle associated with a given writeset * @param source_id ID of the event producer, also serves as the partial order * or stream ID - events with different source_ids won't be * ordered with respect to each other. * @param flags WSREP_FLAG_... flags * @param pa_range the number of preceding events this event can be processed * in parallel with. A value of 0 means strict serial * processing. Note: commits always happen in wsrep order. * @param commit 'true' to commit writeset to cluster (replicate) or * 'false' to rollback (cancel) the writeset. * * @retval WSREP_OK cluster-wide commit succeeded * @retval WSREP_TRX_FAIL operation failed (e.g. NON-PRIMARY component) * @retval WSREP_NODE_FAIL must close all connections and reinit */ wsrep_status_t (*preordered_commit) (wsrep_t* wsrep, wsrep_po_handle_t* handle, const wsrep_uuid_t* source_id, uint32_t flags, int pa_range, wsrep_bool_t commit); /*! * @brief Signals to wsrep provider that state snapshot has been sent to * joiner. * * @param wsrep provider handle * @param state_id state ID * @param rcode 0 or negative error code of the operation. */ wsrep_status_t (*sst_sent)(wsrep_t* wsrep, const wsrep_gtid_t* state_id, int rcode); /*! * @brief Signals to wsrep provider that new state snapshot has been received. * May deadlock if called from sst_prepare_cb. * * @param wsrep provider handle * @param state_id state ID * @param state initial state provided by SST donor * @param state_len length of state buffer * @param rcode 0 or negative error code of the operation. */ wsrep_status_t (*sst_received)(wsrep_t* wsrep, const wsrep_gtid_t* state_id, const void* state, size_t state_len, int rcode); /*! * @brief Generate request for consistent snapshot. * * If successfull, this call will generate internally SST request * which in turn triggers calling SST donate callback on the nodes * specified in donor_spec. If donor_spec is null, callback is * called only locally. This call will block until sst_sent is called * from callback. * * @param wsrep provider handle * @param msg context message for SST donate callback * @param msg_len length of context message * @param donor_spec list of snapshot donors */ wsrep_status_t (*snapshot)(wsrep_t* wsrep, const void* msg, size_t msg_len, const char* donor_spec); /*! * @brief Returns an array of status variables. * Array is terminated by Null variable name. * * @param wsrep provider handle * @return array of struct wsrep_status_var. */ struct wsrep_stats_var* (*stats_get) (wsrep_t* wsrep); /*! * @brief Release resources that might be associated with the array. * * @param wsrep provider handle. * @param var_array array returned by stats_get(). */ void (*stats_free) (wsrep_t* wsrep, struct wsrep_stats_var* var_array); /*! * @brief Reset some stats variables to inital value, provider-dependent. * * @param wsrep provider handle. */ void (*stats_reset) (wsrep_t* wsrep); /*! * @brief Pauses writeset applying/committing. * * @return global sequence number of the paused state or negative error code. */ wsrep_seqno_t (*pause) (wsrep_t* wsrep); /*! * @brief Resumes writeset applying/committing. */ wsrep_status_t (*resume) (wsrep_t* wsrep); /*! * @brief Desynchronize from cluster * * Effectively turns off flow control for this node, allowing it * to fall behind the cluster. */ wsrep_status_t (*desync) (wsrep_t* wsrep); /*! * @brief Request to resynchronize with cluster. * * Effectively turns on flow control. Asynchronous - actual synchronization * event to be deliverred via sync_cb. */ wsrep_status_t (*resync) (wsrep_t* wsrep); /*! * @brief Acquire global named lock * * @param wsrep wsrep provider handle * @param name lock name * @param shared shared or exclusive lock * @param owner 64-bit owner ID * @param tout timeout in nanoseconds. * 0 - return immediately, -1 wait forever. * @return wsrep status or negative error code * @retval -EDEADLK lock was already acquired by this thread * @retval -EBUSY lock was busy */ wsrep_status_t (*lock) (wsrep_t* wsrep, const char* name, wsrep_bool_t shared, uint64_t owner, int64_t tout); /*! * @brief Release global named lock * * @param wsrep wsrep provider handle * @param name lock name * @param owner 64-bit owner ID * @return wsrep status or negative error code * @retval -EPERM lock does not belong to this owner */ wsrep_status_t (*unlock) (wsrep_t* wsrep, const char* name, uint64_t owner); /*! * @brief Check if global named lock is locked * * @param wsrep wsrep provider handle * @param name lock name * @param owner if not NULL will contain 64-bit owner ID * @param node if not NULL will contain owner's node UUID * @return true if lock is locked */ wsrep_bool_t (*is_locked) (wsrep_t* wsrep, const char* name, uint64_t* conn, wsrep_uuid_t* node); /*! * wsrep provider name */ const char* provider_name; /*! * wsrep provider version */ const char* provider_version; /*! * wsrep provider vendor name */ const char* provider_vendor; /*! * @brief Frees allocated resources before unloading the library. * @param wsrep provider handle */ void (*free)(wsrep_t* wsrep); void *dlh; //!< reserved for future use void *ctx; //!< reserved for implemetation private context }; /*! * * @brief Loads wsrep library * * @param spec path to wsrep library. If NULL or WSREP_NONE initialises dummy * pass-through implementation. * @param hptr wsrep handle * @param log_cb callback to handle loader messages. Otherwise writes to stderr. * * @return zero on success, errno on failure */ int wsrep_load(const char* spec, wsrep_t** hptr, wsrep_log_cb_t log_cb); /*! * @brief Unloads wsrep library and frees associated resources * * @param hptr wsrep handler pointer */ void wsrep_unload(wsrep_t* hptr); #ifdef __cplusplus } #endif #endif /* WSREP_H */ galera-3-25.3.20/SConscript0000644000015300001660000000073113042054732015151 0ustar jenkinsjenkinsSConscript(['galerautils/SConscript', 'gcache/SConscript', 'gcomm/SConscript', 'gcs/SConscript', 'galera/SConscript', 'garb/SConscript']) Import('env', 'sysname') libmmgalera_objs = env['LIBGALERA_OBJS'] libmmgalera_objs.extend(env['LIBMMGALERA_OBJS']) if sysname == 'darwin': env.SharedLibrary('galera_smm', libmmgalera_objs, SHLIBSUFFIX='.so') else: env.SharedLibrary('galera_smm', libmmgalera_objs) galera-3-25.3.20/LICENSE0000644000015300001660000004325413042054732014153 0ustar jenkinsjenkins GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. galera-3-25.3.20/asio/0000755000015300001660000000000013042054732014071 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio.hpp0000644000015300001660000001045213042054732015537 0ustar jenkinsjenkins// // asio.hpp // ~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HPP #define ASIO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/async_result.hpp" #include "asio/basic_datagram_socket.hpp" #include "asio/basic_deadline_timer.hpp" #include "asio/basic_io_object.hpp" #include "asio/basic_raw_socket.hpp" #include "asio/basic_seq_packet_socket.hpp" #include "asio/basic_serial_port.hpp" #include "asio/basic_signal_set.hpp" #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/basic_socket_streambuf.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/basic_streambuf.hpp" #include "asio/basic_waitable_timer.hpp" #include "asio/buffer.hpp" #include "asio/buffered_read_stream_fwd.hpp" #include "asio/buffered_read_stream.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/buffered_stream.hpp" #include "asio/buffered_write_stream_fwd.hpp" #include "asio/buffered_write_stream.hpp" #include "asio/buffers_iterator.hpp" #include "asio/completion_condition.hpp" #include "asio/connect.hpp" #include "asio/coroutine.hpp" #include "asio/datagram_socket_service.hpp" #include "asio/deadline_timer_service.hpp" #include "asio/deadline_timer.hpp" #include "asio/error.hpp" #include "asio/error_code.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/generic/datagram_protocol.hpp" #include "asio/generic/raw_protocol.hpp" #include "asio/generic/seq_packet_protocol.hpp" #include "asio/generic/stream_protocol.hpp" #include "asio/handler_alloc_hook.hpp" #include "asio/handler_continuation_hook.hpp" #include "asio/handler_invoke_hook.hpp" #include "asio/handler_type.hpp" #include "asio/io_service.hpp" #include "asio/ip/address.hpp" #include "asio/ip/address_v4.hpp" #include "asio/ip/address_v6.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_entry.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/host_name.hpp" #include "asio/ip/icmp.hpp" #include "asio/ip/multicast.hpp" #include "asio/ip/resolver_query_base.hpp" #include "asio/ip/resolver_service.hpp" #include "asio/ip/tcp.hpp" #include "asio/ip/udp.hpp" #include "asio/ip/unicast.hpp" #include "asio/ip/v6_only.hpp" #include "asio/is_read_buffered.hpp" #include "asio/is_write_buffered.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/local/connect_pair.hpp" #include "asio/local/datagram_protocol.hpp" #include "asio/local/stream_protocol.hpp" #include "asio/placeholders.hpp" #include "asio/posix/basic_descriptor.hpp" #include "asio/posix/basic_stream_descriptor.hpp" #include "asio/posix/descriptor_base.hpp" #include "asio/posix/stream_descriptor.hpp" #include "asio/posix/stream_descriptor_service.hpp" #include "asio/raw_socket_service.hpp" #include "asio/read.hpp" #include "asio/read_at.hpp" #include "asio/read_until.hpp" #include "asio/seq_packet_socket_service.hpp" #include "asio/serial_port.hpp" #include "asio/serial_port_base.hpp" #include "asio/serial_port_service.hpp" #include "asio/signal_set.hpp" #include "asio/signal_set_service.hpp" #include "asio/socket_acceptor_service.hpp" #include "asio/socket_base.hpp" #include "asio/strand.hpp" #include "asio/stream_socket_service.hpp" #include "asio/streambuf.hpp" #include "asio/system_error.hpp" #include "asio/thread.hpp" #include "asio/time_traits.hpp" #include "asio/version.hpp" #include "asio/wait_traits.hpp" #include "asio/waitable_timer_service.hpp" #include "asio/windows/basic_handle.hpp" #include "asio/windows/basic_object_handle.hpp" #include "asio/windows/basic_random_access_handle.hpp" #include "asio/windows/basic_stream_handle.hpp" #include "asio/windows/object_handle.hpp" #include "asio/windows/object_handle_service.hpp" #include "asio/windows/overlapped_ptr.hpp" #include "asio/windows/random_access_handle.hpp" #include "asio/windows/random_access_handle_service.hpp" #include "asio/windows/stream_handle.hpp" #include "asio/windows/stream_handle_service.hpp" #include "asio/write.hpp" #include "asio/write_at.hpp" #endif // ASIO_HPP galera-3-25.3.20/asio/asio/0000755000015300001660000000000013042054732015024 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/handler_continuation_hook.hpp0000644000015300001660000000256713042054732022776 0ustar jenkinsjenkins// // handler_continuation_hook.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HANDLER_CONTINUATION_HOOK_HPP #define ASIO_HANDLER_CONTINUATION_HOOK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default continuation function for handlers. /** * Asynchronous operations may represent a continuation of the asynchronous * control flow associated with the current handler. The implementation can use * this knowledge to optimise scheduling of the handler. * * Implement asio_handler_is_continuation for your own handlers to indicate * when a handler represents a continuation. * * The default implementation of the continuation hook returns false. * * @par Example * @code * class my_handler; * * bool asio_handler_is_continuation(my_handler* context) * { * return true; * } * @endcode */ inline bool asio_handler_is_continuation(...) { return false; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_HANDLER_CONTINUATION_HOOK_HPP galera-3-25.3.20/asio/asio/buffered_read_stream.hpp0000644000015300001660000001662013042054732021672 0ustar jenkinsjenkins// // buffered_read_stream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_READ_STREAM_HPP #define ASIO_BUFFERED_READ_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffered_read_stream_fwd.hpp" #include "asio/buffer.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_resize_guard.hpp" #include "asio/detail/buffered_stream_storage.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the read-related operations of a stream. /** * The buffered_read_stream class template can be used to add buffering to the * synchronous and asynchronous read operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_read_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; #if defined(GENERATING_DOCUMENTATION) /// The default buffer size. static const std::size_t default_buffer_size = implementation_defined; #else ASIO_STATIC_CONSTANT(std::size_t, default_buffer_size = 1024); #endif /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_read_stream(Arg& a) : next_layer_(a), storage_(default_buffer_size) { } /// Construct, passing the specified argument to initialise the next layer. template buffered_read_stream(Arg& a, std::size_t buffer_size) : next_layer_(a), storage_(buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Get the io_service associated with the object. asio::io_service& get_io_service() { return next_layer_.get_io_service(); } /// Close the stream. void close() { next_layer_.close(); } /// Close the stream. asio::error_code close(asio::error_code& ec) { return next_layer_.close(ec); } /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers) { return next_layer_.write_some(buffers); } /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return next_layer_.write_some(buffers, ec); } /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); next_layer_.async_write_some(buffers, ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(WriteHandler, void (asio::error_code, std::size_t)))(init.handler)); return init.result.get(); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation. Throws an exception on failure. std::size_t fill(); /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation, or 0 if an error occurred. std::size_t fill(asio::error_code& ec); /// Start an asynchronous fill. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_fill(ASIO_MOVE_ARG(ReadHandler) handler); /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers); /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec); /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler); /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers); /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec); /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return storage_.size(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { ec = asio::error_code(); return storage_.size(); } private: /// Copy data out of the internal buffer to the specified target buffer. /// Returns the number of bytes copied. template std::size_t copy(const MutableBufferSequence& buffers) { std::size_t bytes_copied = asio::buffer_copy( buffers, storage_.data(), storage_.size()); storage_.consume(bytes_copied); return bytes_copied; } /// Copy data from the internal buffer to the specified target buffer, without /// removing the data from the internal buffer. Returns the number of bytes /// copied. template std::size_t peek_copy(const MutableBufferSequence& buffers) { return asio::buffer_copy(buffers, storage_.data(), storage_.size()); } /// The next layer. Stream next_layer_; // The data in the buffer. detail::buffered_stream_storage storage_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/buffered_read_stream.hpp" #endif // ASIO_BUFFERED_READ_STREAM_HPP galera-3-25.3.20/asio/asio/read_until.hpp0000644000015300001660000010731513042054732017672 0ustar jenkinsjenkins// // read_until.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_READ_UNTIL_HPP #define ASIO_READ_UNTIL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include #include #include "asio/async_result.hpp" #include "asio/basic_streambuf.hpp" #include "asio/detail/regex_fwd.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { char (&has_result_type_helper(...))[2]; template char has_result_type_helper(T*, typename T::result_type* = 0); template struct has_result_type { enum { value = (sizeof((has_result_type_helper)((T*)(0))) == 1) }; }; } // namespace detail /// Type trait used to determine whether a type can be used as a match condition /// function with read_until and async_read_until. template struct is_match_condition { #if defined(GENERATING_DOCUMENTATION) /// The value member is true if the type may be used as a match condition. static const bool value; #else enum { value = asio::is_function< typename asio::remove_pointer::type>::value || detail::has_result_type::value }; #endif }; /** * @defgroup read_until asio::read_until * * @brief Read data into a streambuf until it contains a delimiter, matches a * regular expression, or a function object indicates a match. */ /*@{*/ /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter character. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. * * @par Example * To read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * asio::read_until(s, b, '\n'); * std::istream is(&b); * std::string line; * std::getline(is, line); @endcode * After the @c read_until operation completes successfully, the buffer @c b * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\n' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, char delim); /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter character. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, char delim, asio::error_code& ec); /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter string. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. * * @par Example * To read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * asio::read_until(s, b, "\r\n"); * std::istream is(&b); * std::string line; * std::getline(is, line); @endcode * After the @c read_until operation completes successfully, the buffer @c b * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const std::string& delim); /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter string. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const std::string& delim, asio::error_code& ec); #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Read data into a streambuf until some part of the data it contains matches /// a regular expression. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains some data that matches a regular expression. * The call will block until one of the following conditions is true: * * @li A substring of the streambuf's get area matches the regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains data that * matches the regular expression, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param expr The regular expression. * * @returns The number of bytes in the streambuf's get area up to and including * the substring that matches the regular expression. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the regular expression. An * application will typically leave that data in the streambuf for a subsequent * read_until operation to examine. * * @par Example * To read data into a streambuf until a CR-LF sequence is encountered: * @code asio::streambuf b; * asio::read_until(s, b, boost::regex("\r\n")); * std::istream is(&b); * std::string line; * std::getline(is, line); @endcode * After the @c read_until operation completes successfully, the buffer @c b * contains the data which matched the regular expression: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * match, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr); /// Read data into a streambuf until some part of the data it contains matches /// a regular expression. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains some data that matches a regular expression. * The call will block until one of the following conditions is true: * * @li A substring of the streambuf's get area matches the regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains data that * matches the regular expression, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param expr The regular expression. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area up to and including * the substring that matches the regular expression. Returns 0 if an error * occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the regular expression. An * application will typically leave that data in the streambuf for a subsequent * read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr, asio::error_code& ec); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Read data into a streambuf until a function object indicates a match. /** * This function is used to read data into the specified streambuf until a * user-defined match condition function object, when applied to the data * contained in the streambuf, indicates a successful match. The call will * block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator::const_buffers_type> * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @returns The number of bytes in the streambuf's get area that have been fully * consumed by the match function. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the function object. An application * will typically leave that data in the streambuf for a subsequent * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To read data into a streambuf until whitespace is encountered: * @code typedef asio::buffers_iterator< * asio::streambuf::const_buffers_type> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * asio::streambuf b; * asio::read_until(s, b, match_whitespace); * @endcode * * To read data into a streambuf until a matching character is found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * asio::streambuf b; * asio::read_until(s, b, match_char('a')); * @endcode */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, typename enable_if::value>::type* = 0); /// Read data into a streambuf until a function object indicates a match. /** * This function is used to read data into the specified streambuf until a * user-defined match condition function object, when applied to the data * contained in the streambuf, indicates a successful match. The call will * block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator::const_buffers_type> * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area that have been fully * consumed by the match function. Returns 0 if an error occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the function object. An application * will typically leave that data in the streambuf for a subsequent * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, asio::error_code& ec, typename enable_if::value>::type* = 0); /*@}*/ /** * @defgroup async_read_until asio::async_read_until * * @brief Start an asynchronous operation to read data into a streambuf until it * contains a delimiter, matches a regular expression, or a function object * indicates a match. */ /*@{*/ /// Start an asynchronous operation to read data into a streambuf until it /// contains a specified delimiter. /** * This function is used to asynchronously read data into the specified * streambuf until the streambuf's get area contains the specified delimiter. * The function call always returns immediately. The asynchronous operation * will continue until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the streambuf's get area already contains the delimiter, this asynchronous * operation completes immediately. The program must ensure that the stream * performs no other read operations (such as async_read, async_read_until, the * stream's async_read_some function, or any other composed operations that * perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. Ownership of * the streambuf is retained by the caller, which must guarantee that it remains * valid until the handler is called. * * @param delim The delimiter character. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond the delimiter. An application will typically * leave that data in the streambuf for a subsequent async_read_until operation * to examine. * * @par Example * To asynchronously read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::istream is(&b); * std::string line; * std::getline(is, line); * ... * } * } * ... * asio::async_read_until(s, b, '\n', handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c b contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\n' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, char delim, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read data into a streambuf until it /// contains a specified delimiter. /** * This function is used to asynchronously read data into the specified * streambuf until the streambuf's get area contains the specified delimiter. * The function call always returns immediately. The asynchronous operation * will continue until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the streambuf's get area already contains the delimiter, this asynchronous * operation completes immediately. The program must ensure that the stream * performs no other read operations (such as async_read, async_read_until, the * stream's async_read_some function, or any other composed operations that * perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. Ownership of * the streambuf is retained by the caller, which must guarantee that it remains * valid until the handler is called. * * @param delim The delimiter string. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond the delimiter. An application will typically * leave that data in the streambuf for a subsequent async_read_until operation * to examine. * * @par Example * To asynchronously read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::istream is(&b); * std::string line; * std::getline(is, line); * ... * } * } * ... * asio::async_read_until(s, b, "\r\n", handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c b contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, const std::string& delim, ASIO_MOVE_ARG(ReadHandler) handler); #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a streambuf until some /// part of its data matches a regular expression. /** * This function is used to asynchronously read data into the specified * streambuf until the streambuf's get area contains some data that matches a * regular expression. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li A substring of the streambuf's get area matches the regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the streambuf's get area already contains data that matches the regular * expression, this asynchronous operation completes immediately. The program * must ensure that the stream performs no other read operations (such as * async_read, async_read_until, the stream's async_read_some function, or any * other composed operations that perform reads) until this operation * completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. Ownership of * the streambuf is retained by the caller, which must guarantee that it remains * valid until the handler is called. * * @param expr The regular expression. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area up to and including the substring * // that matches the regular. expression. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond that which matched the regular expression. An * application will typically leave that data in the streambuf for a subsequent * async_read_until operation to examine. * * @par Example * To asynchronously read data into a streambuf until a CR-LF sequence is * encountered: * @code asio::streambuf b; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::istream is(&b); * std::string line; * std::getline(is, line); * ... * } * } * ... * asio::async_read_until(s, b, boost::regex("\r\n"), handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c b contains the data which matched the regular expression: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * match, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a streambuf until a /// function object indicates a match. /** * This function is used to asynchronously read data into the specified * streambuf until a user-defined match condition function object, when applied * to the data contained in the streambuf, indicates a successful match. The * function call always returns immediately. The asynchronous operation will * continue until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the match condition function object already indicates a match, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator::const_buffers_type> * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area that have been fully consumed by the * // match function. O if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond that which matched the function object. An * application will typically leave that data in the streambuf for a subsequent * async_read_until operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To asynchronously read data into a streambuf until whitespace is encountered: * @code typedef asio::buffers_iterator< * asio::streambuf::const_buffers_type> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * void handler(const asio::error_code& e, std::size_t size); * ... * asio::streambuf b; * asio::async_read_until(s, b, match_whitespace, handler); * @endcode * * To asynchronously read data into a streambuf until a matching character is * found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * void handler(const asio::error_code& e, std::size_t size); * ... * asio::streambuf b; * asio::async_read_until(s, b, match_char('a'), handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if::value>::type* = 0); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/read_until.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_READ_UNTIL_HPP galera-3-25.3.20/asio/asio/completion_condition.hpp0000644000015300001660000001232713042054732021761 0ustar jenkinsjenkins// // completion_condition.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_COMPLETION_CONDITION_HPP #define ASIO_COMPLETION_CONDITION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // The default maximum number of bytes to transfer in a single operation. enum default_max_transfer_size_t { default_max_transfer_size = 65536 }; // Adapt result of old-style completion conditions (which had a bool result // where true indicated that the operation was complete). inline std::size_t adapt_completion_condition_result(bool result) { return result ? 0 : default_max_transfer_size; } // Adapt result of current completion conditions (which have a size_t result // where 0 means the operation is complete, and otherwise the result is the // maximum number of bytes to transfer on the next underlying operation). inline std::size_t adapt_completion_condition_result(std::size_t result) { return result; } class transfer_all_t { public: typedef std::size_t result_type; template std::size_t operator()(const Error& err, std::size_t) { return !!err ? 0 : default_max_transfer_size; } }; class transfer_at_least_t { public: typedef std::size_t result_type; explicit transfer_at_least_t(std::size_t minimum) : minimum_(minimum) { } template std::size_t operator()(const Error& err, std::size_t bytes_transferred) { return (!!err || bytes_transferred >= minimum_) ? 0 : default_max_transfer_size; } private: std::size_t minimum_; }; class transfer_exactly_t { public: typedef std::size_t result_type; explicit transfer_exactly_t(std::size_t size) : size_(size) { } template std::size_t operator()(const Error& err, std::size_t bytes_transferred) { return (!!err || bytes_transferred >= size_) ? 0 : (size_ - bytes_transferred < default_max_transfer_size ? size_ - bytes_transferred : std::size_t(default_max_transfer_size)); } private: std::size_t size_; }; } // namespace detail /** * @defgroup completion_condition Completion Condition Function Objects * * Function objects used for determining when a read or write operation should * complete. */ /*@{*/ /// Return a completion condition function object that indicates that a read or /// write operation should continue until all of the data has been transferred, /// or until an error occurs. /** * This function is used to create an object, of unspecified type, that meets * CompletionCondition requirements. * * @par Example * Reading until a buffer is full: * @code * boost::array buf; * asio::error_code ec; * std::size_t n = asio::read( * sock, asio::buffer(buf), * asio::transfer_all(), ec); * if (ec) * { * // An error occurred. * } * else * { * // n == 128 * } * @endcode */ #if defined(GENERATING_DOCUMENTATION) unspecified transfer_all(); #else inline detail::transfer_all_t transfer_all() { return detail::transfer_all_t(); } #endif /// Return a completion condition function object that indicates that a read or /// write operation should continue until a minimum number of bytes has been /// transferred, or until an error occurs. /** * This function is used to create an object, of unspecified type, that meets * CompletionCondition requirements. * * @par Example * Reading until a buffer is full or contains at least 64 bytes: * @code * boost::array buf; * asio::error_code ec; * std::size_t n = asio::read( * sock, asio::buffer(buf), * asio::transfer_at_least(64), ec); * if (ec) * { * // An error occurred. * } * else * { * // n >= 64 && n <= 128 * } * @endcode */ #if defined(GENERATING_DOCUMENTATION) unspecified transfer_at_least(std::size_t minimum); #else inline detail::transfer_at_least_t transfer_at_least(std::size_t minimum) { return detail::transfer_at_least_t(minimum); } #endif /// Return a completion condition function object that indicates that a read or /// write operation should continue until an exact number of bytes has been /// transferred, or until an error occurs. /** * This function is used to create an object, of unspecified type, that meets * CompletionCondition requirements. * * @par Example * Reading until a buffer is full or contains exactly 64 bytes: * @code * boost::array buf; * asio::error_code ec; * std::size_t n = asio::read( * sock, asio::buffer(buf), * asio::transfer_exactly(64), ec); * if (ec) * { * // An error occurred. * } * else * { * // n == 64 * } * @endcode */ #if defined(GENERATING_DOCUMENTATION) unspecified transfer_exactly(std::size_t size); #else inline detail::transfer_exactly_t transfer_exactly(std::size_t size) { return detail::transfer_exactly_t(size); } #endif /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_COMPLETION_CONDITION_HPP galera-3-25.3.20/asio/asio/detail/0000755000015300001660000000000013042054732016266 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/detail/win_iocp_socket_recvmsg_op.hpp0000644000015300001660000000710413042054732024404 0ustar jenkinsjenkins// // detail/win_iocp_socket_recvmsg_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recvmsg_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvmsg_op); win_iocp_socket_recvmsg_op( socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, socket_base::message_flags& out_flags, Handler& handler) : operation(&win_iocp_socket_recvmsg_op::do_complete), cancel_token_(cancel_token), buffers_(buffers), out_flags_(out_flags), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recvmsg_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recvmsg(o->cancel_token_, ec); o->out_flags_ = 0; // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; socket_base::message_flags& out_flags_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP galera-3-25.3.20/asio/asio/detail/mutex.hpp0000644000015300001660000000230013042054732020134 0ustar jenkinsjenkins// // detail/mutex.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MUTEX_HPP #define ASIO_DETAIL_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_mutex.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_mutex.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_mutex.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_mutex.hpp" #else # error Only Windows, POSIX and std::mutex are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_mutex mutex; #elif defined(ASIO_WINDOWS) typedef win_mutex mutex; #elif defined(ASIO_HAS_PTHREADS) typedef posix_mutex mutex; #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_mutex mutex; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_sendto_op.hpp0000644000015300001660000000771213042054732024232 0ustar jenkinsjenkins// // detail/reactive_socket_sendto_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_sendto_op_base : public reactor_op { public: reactive_socket_sendto_op_base(socket_type socket, const ConstBufferSequence& buffers, const Endpoint& endpoint, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_sendto_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), destination_(endpoint), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_sendto_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_sendto(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->destination_.data(), o->destination_.size(), o->ec_, o->bytes_transferred_); } private: socket_type socket_; ConstBufferSequence buffers_; Endpoint destination_; socket_base::message_flags flags_; }; template class reactive_socket_sendto_op : public reactive_socket_sendto_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_sendto_op); reactive_socket_sendto_op(socket_type socket, const ConstBufferSequence& buffers, const Endpoint& endpoint, socket_base::message_flags flags, Handler& handler) : reactive_socket_sendto_op_base(socket, buffers, endpoint, flags, &reactive_socket_sendto_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_sendto_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP galera-3-25.3.20/asio/asio/detail/fd_set_adapter.hpp0000644000015300001660000000171313042054732021745 0ustar jenkinsjenkins// // detail/fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/posix_fd_set_adapter.hpp" #include "asio/detail/win_fd_set_adapter.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef win_fd_set_adapter fd_set_adapter; #else typedef posix_fd_set_adapter fd_set_adapter; #endif } // namespace detail } // namespace asio #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_FD_SET_ADAPTER_HPP galera-3-25.3.20/asio/asio/detail/win_fenced_block.hpp0000644000015300001660000000367013042054732022260 0ustar jenkinsjenkins// // detail/win_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_FENCED_BLOCK_HPP #define ASIO_DETAIL_WIN_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && !defined(UNDER_CE) #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit win_fenced_block(half_t) { } // Constructor for a full fenced block. explicit win_fenced_block(full_t) { #if defined(__BORLANDC__) LONG barrier = 0; ::InterlockedExchange(&barrier, 1); #elif defined(ASIO_MSVC) \ && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier)) # if defined(_M_IX86) # pragma warning(push) # pragma warning(disable:4793) LONG barrier; __asm { xchg barrier, eax } # pragma warning(pop) # endif // defined(_M_IX86) #else MemoryBarrier(); #endif } // Destructor. ~win_fenced_block() { #if defined(__BORLANDC__) LONG barrier = 0; ::InterlockedExchange(&barrier, 1); #elif defined(ASIO_MSVC) \ && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier)) # if defined(_M_IX86) # pragma warning(push) # pragma warning(disable:4793) LONG barrier; __asm { xchg barrier, eax } # pragma warning(pop) # endif // defined(_M_IX86) #else MemoryBarrier(); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && !defined(UNDER_CE) #endif // ASIO_DETAIL_WIN_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_send_op.hpp0000644000015300001660000000727313042054732023671 0ustar jenkinsjenkins// // detail/reactive_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_send_op_base : public reactor_op { public: reactive_socket_send_op_base(socket_type socket, const ConstBufferSequence& buffers, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_send_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_send_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_send(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->ec_, o->bytes_transferred_); } private: socket_type socket_; ConstBufferSequence buffers_; socket_base::message_flags flags_; }; template class reactive_socket_send_op : public reactive_socket_send_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_send_op); reactive_socket_send_op(socket_type socket, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) : reactive_socket_send_op_base(socket, buffers, flags, &reactive_socket_send_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP galera-3-25.3.20/asio/asio/detail/winrt_resolve_op.hpp0000644000015300001660000000672213042054732022406 0ustar jenkinsjenkins// // detail/winrt_resolve_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_RESOLVE_OP_HPP #define ASIO_DETAIL_WINRT_RESOLVE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_resolve_op : public winrt_async_op< Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^> { public: ASIO_DEFINE_HANDLER_PTR(winrt_resolve_op); typedef typename Protocol::endpoint endpoint_type; typedef asio::ip::basic_resolver_query query_type; typedef asio::ip::basic_resolver_iterator iterator_type; winrt_resolve_op(const query_type& query, Handler& handler) : winrt_async_op< Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^>( &winrt_resolve_op::do_complete), query_(query), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_resolve_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); iterator_type iterator = iterator_type(); if (!o->ec_) { try { iterator = iterator_type::create( o->result_, o->query_.hints(), o->query_.host_name(), o->query_.service_name()); } catch (Platform::Exception^ e) { o->ec_ = asio::error_code(e->HResult, asio::system_category()); } } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, iterator); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: query_type query_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_RESOLVE_OP_HPP galera-3-25.3.20/asio/asio/detail/regex_fwd.hpp0000644000015300001660000000146013042054732020752 0ustar jenkinsjenkins// // detail/regex_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REGEX_FWD_HPP #define ASIO_DETAIL_REGEX_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if defined(ASIO_HAS_BOOST_REGEX) #include #include namespace boost { template struct sub_match; template class match_results; } // namespace boost #endif // defined(ASIO_HAS_BOOST_REGEX) #endif // ASIO_DETAIL_REGEX_FWD_HPP galera-3-25.3.20/asio/asio/detail/socket_holder.hpp0000644000015300001660000000377313042054732021636 0ustar jenkinsjenkins// // detail/socket_holder.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_HOLDER_HPP #define ASIO_DETAIL_SOCKET_HOLDER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Implement the resource acquisition is initialisation idiom for sockets. class socket_holder : private noncopyable { public: // Construct as an uninitialised socket. socket_holder() : socket_(invalid_socket) { } // Construct to take ownership of the specified socket. explicit socket_holder(socket_type s) : socket_(s) { } // Destructor. ~socket_holder() { if (socket_ != invalid_socket) { asio::error_code ec; socket_ops::state_type state = 0; socket_ops::close(socket_, state, true, ec); } } // Get the underlying socket. socket_type get() const { return socket_; } // Reset to an uninitialised socket. void reset() { if (socket_ != invalid_socket) { asio::error_code ec; socket_ops::state_type state = 0; socket_ops::close(socket_, state, true, ec); socket_ = invalid_socket; } } // Reset to take ownership of the specified socket. void reset(socket_type s) { reset(); socket_ = s; } // Release ownership of the socket. socket_type release() { socket_type tmp = socket_; socket_ = invalid_socket; return tmp; } private: // The underlying socket. socket_type socket_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_HOLDER_HPP galera-3-25.3.20/asio/asio/detail/winrt_async_op.hpp0000644000015300001660000000255413042054732022043 0ustar jenkinsjenkins// // detail/winrt_async_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_ASYNC_OP_HPP #define ASIO_DETAIL_WINRT_ASYNC_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_async_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The result of the operation, to be passed to the completion handler. TResult result_; protected: winrt_async_op(func_type complete_func) : operation(complete_func), result_() { } }; template <> class winrt_async_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: winrt_async_op(func_type complete_func) : operation(complete_func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WINRT_ASYNC_OP_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_operation.hpp0000644000015300001660000000376013042054732022674 0ustar jenkinsjenkins// // detail/win_iocp_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OPERATION_HPP #define ASIO_DETAIL_WIN_IOCP_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/handler_tracking.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_io_service; // Base class for all operations. A function pointer is used instead of virtual // functions to avoid the associated overhead. class win_iocp_operation : public OVERLAPPED ASIO_ALSO_INHERIT_TRACKED_HANDLER { public: void complete(win_iocp_io_service& owner, const asio::error_code& ec, std::size_t bytes_transferred) { func_(&owner, this, ec, bytes_transferred); } void destroy() { func_(0, this, asio::error_code(), 0); } protected: typedef void (*func_type)( win_iocp_io_service*, win_iocp_operation*, const asio::error_code&, std::size_t); win_iocp_operation(func_type func) : next_(0), func_(func) { reset(); } // Prevents deletion through this type. ~win_iocp_operation() { } void reset() { Internal = 0; InternalHigh = 0; Offset = 0; OffsetHigh = 0; hEvent = 0; ready_ = 0; } private: friend class op_queue_access; friend class win_iocp_io_service; win_iocp_operation* next_; func_type func_; long ready_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OPERATION_HPP galera-3-25.3.20/asio/asio/detail/gcc_x86_fenced_block.hpp0000644000015300001660000000444313042054732022723 0ustar jenkinsjenkins// // detail/gcc_x86_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_x86_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_x86_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_x86_fenced_block(full_t) { lbarrier(); } // Destructor. ~gcc_x86_fenced_block() { sbarrier(); } private: static int barrier() { int r = 0, m = 1; __asm__ __volatile__ ( "xchgl %0, %1" : "=r"(r), "=m"(m) : "0"(1), "m"(m) : "memory", "cc"); return r; } static void lbarrier() { #if defined(__SSE2__) # if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __builtin_ia32_lfence(); # else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __asm__ __volatile__ ("lfence" ::: "memory"); # endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) #else // defined(__SSE2__) barrier(); #endif // defined(__SSE2__) } static void sbarrier() { #if defined(__SSE2__) # if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __builtin_ia32_sfence(); # else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __asm__ __volatile__ ("sfence" ::: "memory"); # endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) #else // defined(__SSE2__) barrier(); #endif // defined(__SSE2__) } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #endif // ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/wrapped_handler.hpp0000644000015300001660000001770513042054732022150 0ustar jenkinsjenkins// // detail/wrapped_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WRAPPED_HANDLER_HPP #define ASIO_DETAIL_WRAPPED_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/bind_handler.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct is_continuation_delegated { template bool operator()(Dispatcher&, Handler& handler) const { return asio_handler_cont_helpers::is_continuation(handler); } }; struct is_continuation_if_running { template bool operator()(Dispatcher& dispatcher, Handler&) const { return dispatcher.running_in_this_thread(); } }; template class wrapped_handler { public: typedef void result_type; wrapped_handler(Dispatcher dispatcher, Handler& handler) : dispatcher_(dispatcher), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } #if defined(ASIO_HAS_MOVE) wrapped_handler(const wrapped_handler& other) : dispatcher_(other.dispatcher_), handler_(other.handler_) { } wrapped_handler(wrapped_handler&& other) : dispatcher_(other.dispatcher_), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { dispatcher_.dispatch(ASIO_MOVE_CAST(Handler)(handler_)); } void operator()() const { dispatcher_.dispatch(handler_); } template void operator()(const Arg1& arg1) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1)); } template void operator()(const Arg1& arg1) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1)); } template void operator()(const Arg1& arg1, const Arg2& arg2) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2)); } template void operator()(const Arg1& arg1, const Arg2& arg2) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) const { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) const { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5)); } //private: Dispatcher dispatcher_; Handler handler_; }; template class rewrapped_handler { public: explicit rewrapped_handler(Handler& handler, const Context& context) : context_(context), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } explicit rewrapped_handler(const Handler& handler, const Context& context) : context_(context), handler_(handler) { } #if defined(ASIO_HAS_MOVE) rewrapped_handler(const rewrapped_handler& other) : context_(other.context_), handler_(other.handler_) { } rewrapped_handler(rewrapped_handler&& other) : context_(ASIO_MOVE_CAST(Context)(other.context_)), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(); } void operator()() const { handler_(); } //private: Context context_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, wrapped_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, wrapped_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( wrapped_handler* this_handler) { return IsContinuation()(this_handler->dispatcher_, this_handler->handler_); } template inline void asio_handler_invoke(Function& function, wrapped_handler* this_handler) { this_handler->dispatcher_.dispatch( rewrapped_handler( function, this_handler->handler_)); } template inline void asio_handler_invoke(const Function& function, wrapped_handler* this_handler) { this_handler->dispatcher_.dispatch( rewrapped_handler( function, this_handler->handler_)); } template inline void* asio_handler_allocate(std::size_t size, rewrapped_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->context_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, rewrapped_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->context_); } template inline bool asio_handler_is_continuation( rewrapped_handler* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->context_); } template inline void asio_handler_invoke(Function& function, rewrapped_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->context_); } template inline void asio_handler_invoke(const Function& function, rewrapped_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->context_); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WRAPPED_HANDLER_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_connect_op.hpp0000644000015300001660000000674013042054732024374 0ustar jenkinsjenkins// // detail/win_iocp_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_socket_connect_op_base : public reactor_op { public: win_iocp_socket_connect_op_base(socket_type socket, func_type complete_func) : reactor_op(&win_iocp_socket_connect_op_base::do_perform, complete_func), socket_(socket), connect_ex_(false) { } static bool do_perform(reactor_op* base) { win_iocp_socket_connect_op_base* o( static_cast(base)); return socket_ops::non_blocking_connect(o->socket_, o->ec_); } socket_type socket_; bool connect_ex_; }; template class win_iocp_socket_connect_op : public win_iocp_socket_connect_op_base { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_connect_op); win_iocp_socket_connect_op(socket_type socket, Handler& handler) : win_iocp_socket_connect_op_base(socket, &win_iocp_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_connect_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner) { if (o->connect_ex_) socket_ops::complete_iocp_connect(o->socket_, ec); else ec = o->ec_; } ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP galera-3-25.3.20/asio/asio/detail/descriptor_read_op.hpp0000644000015300001660000000707613042054732022660 0ustar jenkinsjenkins// // detail/descriptor_read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP #define ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class descriptor_read_op_base : public reactor_op { public: descriptor_read_op_base(int descriptor, const MutableBufferSequence& buffers, func_type complete_func) : reactor_op(&descriptor_read_op_base::do_perform, complete_func), descriptor_(descriptor), buffers_(buffers) { } static bool do_perform(reactor_op* base) { descriptor_read_op_base* o(static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return descriptor_ops::non_blocking_read(o->descriptor_, bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_); } private: int descriptor_; MutableBufferSequence buffers_; }; template class descriptor_read_op : public descriptor_read_op_base { public: ASIO_DEFINE_HANDLER_PTR(descriptor_read_op); descriptor_read_op(int descriptor, const MutableBufferSequence& buffers, Handler& handler) : descriptor_read_op_base( descriptor, buffers, &descriptor_read_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. descriptor_read_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP galera-3-25.3.20/asio/asio/detail/resolver_service.hpp0000644000015300001660000000771613042054732022373 0ustar jenkinsjenkins// // detail/resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_SERVICE_HPP #define ASIO_DETAIL_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/resolve_endpoint_op.hpp" #include "asio/detail/resolve_op.hpp" #include "asio/detail/resolver_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolver_service : public resolver_service_base { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the background thread that the operation has been cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The query type. typedef asio::ip::basic_resolver_query query_type; // The iterator type. typedef asio::ip::basic_resolver_iterator iterator_type; // Constructor. resolver_service(asio::io_service& io_service) : resolver_service_base(io_service) { } // Resolve a query to a list of entries. iterator_type resolve(implementation_type&, const query_type& query, asio::error_code& ec) { asio::detail::addrinfo_type* address_info = 0; socket_ops::getaddrinfo(query.host_name().c_str(), query.service_name().c_str(), query.hints(), &address_info, ec); auto_addrinfo auto_address_info(address_info); return ec ? iterator_type() : iterator_type::create( address_info, query.host_name(), query.service_name()); } // Asynchronously resolve a query to a list of entries. template void async_resolve(implementation_type& impl, const query_type& query, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef resolve_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl, query, io_service_impl_, handler); ASIO_HANDLER_CREATION((p.p, "resolver", &impl, "async_resolve")); start_resolve_op(p.p); p.v = p.p = 0; } // Resolve an endpoint to a list of entries. iterator_type resolve(implementation_type&, const endpoint_type& endpoint, asio::error_code& ec) { char host_name[NI_MAXHOST]; char service_name[NI_MAXSERV]; socket_ops::sync_getnameinfo(endpoint.data(), endpoint.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV, endpoint.protocol().type(), ec); return ec ? iterator_type() : iterator_type::create( endpoint, host_name, service_name); } // Asynchronously resolve an endpoint to a list of entries. template void async_resolve(implementation_type& impl, const endpoint_type& endpoint, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef resolve_endpoint_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl, endpoint, io_service_impl_, handler); ASIO_HANDLER_CREATION((p.p, "resolver", &impl, "async_resolve")); start_resolve_op(p.p); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_RESOLVER_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/buffered_stream_storage.hpp0000644000015300001660000000547213042054732023670 0ustar jenkinsjenkins// // detail/buffered_stream_storage.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP #define ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/assert.hpp" #include #include #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class buffered_stream_storage { public: // The type of the bytes stored in the buffer. typedef unsigned char byte_type; // The type used for offsets into the buffer. typedef std::size_t size_type; // Constructor. explicit buffered_stream_storage(std::size_t buffer_capacity) : begin_offset_(0), end_offset_(0), buffer_(buffer_capacity) { } /// Clear the buffer. void clear() { begin_offset_ = 0; end_offset_ = 0; } // Return a pointer to the beginning of the unread data. mutable_buffer data() { return asio::buffer(buffer_) + begin_offset_; } // Return a pointer to the beginning of the unread data. const_buffer data() const { return asio::buffer(buffer_) + begin_offset_; } // Is there no unread data in the buffer. bool empty() const { return begin_offset_ == end_offset_; } // Return the amount of unread data the is in the buffer. size_type size() const { return end_offset_ - begin_offset_; } // Resize the buffer to the specified length. void resize(size_type length) { ASIO_ASSERT(length <= capacity()); if (begin_offset_ + length <= capacity()) { end_offset_ = begin_offset_ + length; } else { using namespace std; // For memmove. memmove(&buffer_[0], &buffer_[0] + begin_offset_, size()); end_offset_ = length; begin_offset_ = 0; } } // Return the maximum size for data in the buffer. size_type capacity() const { return buffer_.size(); } // Consume multiple bytes from the beginning of the buffer. void consume(size_type count) { ASIO_ASSERT(begin_offset_ + count <= end_offset_); begin_offset_ += count; if (empty()) clear(); } private: // The offset to the beginning of the unread data. size_type begin_offset_; // The offset to the end of the unread data. size_type end_offset_; // The data in the buffer. std::vector buffer_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP galera-3-25.3.20/asio/asio/detail/null_event.hpp0000644000015300001660000000311213042054732021147 0ustar jenkinsjenkins// // detail/null_event.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_EVENT_HPP #define ASIO_DETAIL_NULL_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_event : private noncopyable { public: // Constructor. null_event() { } // Destructor. ~null_event() { } // Signal the event. (Retained for backward compatibility.) template void signal(Lock&) { } // Signal all waiters. template void signal_all(Lock&) { } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock&) { } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock&) { return false; } // Reset the event. template void clear(Lock&) { } // Wait for the event to become signalled. template void wait(Lock&) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_EVENT_HPP galera-3-25.3.20/asio/asio/detail/pop_options.hpp0000644000015300001660000000463713042054732021362 0ustar jenkinsjenkins// // detail/pop_options.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // No header guard #if defined(__COMO__) // Comeau C++ #elif defined(__DMC__) // Digital Mars C++ #elif defined(__INTEL_COMPILER) || defined(__ICL) \ || defined(__ICC) || defined(__ECC) // Intel C++ # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__clang__) // Clang # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if defined(ASIO_OBJC_WORKAROUND) # undef Protocol # undef id # undef ASIO_OBJC_WORKAROUND # endif # endif # endif # if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) # pragma GCC visibility pop # endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) #elif defined(__GNUC__) // GNU C++ # if defined(__MINGW32__) || defined(__CYGWIN__) # pragma pack (pop) # endif # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if defined(ASIO_OBJC_WORKAROUND) # undef Protocol # undef id # undef ASIO_OBJC_WORKAROUND # endif # endif # endif # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__KCC) // Kai C++ #elif defined(__sgi) // SGI MIPSpro C++ #elif defined(__DECCXX) // Compaq Tru64 Unix cxx #elif defined(__ghs) // Greenhills C++ #elif defined(__BORLANDC__) // Borland C++ # pragma option pop # pragma nopushoptwarn # pragma nopackwarning #elif defined(__MWERKS__) // Metrowerks CodeWarrior #elif defined(__SUNPRO_CC) // Sun Workshop Compiler C++ #elif defined(__HP_aCC) // HP aCC #elif defined(__MRC__) || defined(__SC__) // MPW MrCpp or SCpp #elif defined(__IBMCPP__) // IBM Visual Age #elif defined(_MSC_VER) // Microsoft Visual C++ // // Must remain the last #elif since some other vendors (Metrowerks, for example) // also #define _MSC_VER # pragma warning (pop) # pragma pack (pop) # if defined(__cplusplus_cli) || defined(__cplusplus_winrt) # if defined(ASIO_CLR_WORKAROUND) # undef generic # undef ASIO_CLR_WORKAROUND # endif # endif #endif galera-3-25.3.20/asio/asio/detail/reactive_socket_accept_op.hpp0000644000015300001660000001055513042054732024174 0ustar jenkinsjenkins// // detail/reactive_socket_accept_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_accept_op_base : public reactor_op { public: reactive_socket_accept_op_base(socket_type socket, socket_ops::state_type state, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, func_type complete_func) : reactor_op(&reactive_socket_accept_op_base::do_perform, complete_func), socket_(socket), state_(state), peer_(peer), protocol_(protocol), peer_endpoint_(peer_endpoint) { } static bool do_perform(reactor_op* base) { reactive_socket_accept_op_base* o( static_cast(base)); std::size_t addrlen = o->peer_endpoint_ ? o->peer_endpoint_->capacity() : 0; socket_type new_socket = invalid_socket; bool result = socket_ops::non_blocking_accept(o->socket_, o->state_, o->peer_endpoint_ ? o->peer_endpoint_->data() : 0, o->peer_endpoint_ ? &addrlen : 0, o->ec_, new_socket); // On success, assign new connection to peer socket object. if (new_socket != invalid_socket) { socket_holder new_socket_holder(new_socket); if (o->peer_endpoint_) o->peer_endpoint_->resize(addrlen); if (!o->peer_.assign(o->protocol_, new_socket, o->ec_)) new_socket_holder.release(); } return result; } private: socket_type socket_; socket_ops::state_type state_; Socket& peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; }; template class reactive_socket_accept_op : public reactive_socket_accept_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_accept_op); reactive_socket_accept_op(socket_type socket, socket_ops::state_type state, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, Handler& handler) : reactive_socket_accept_op_base(socket, state, peer, protocol, peer_endpoint, &reactive_socket_accept_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_accept_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_handle_write_op.hpp0000644000015300001660000000622413042054732024035 0ustar jenkinsjenkins// // detail/win_iocp_handle_write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_handle_write_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_write_op); win_iocp_handle_write_op(const ConstBufferSequence& buffers, Handler& handler) : operation(&win_iocp_handle_write_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { // Take ownership of the operation object. win_iocp_handle_write_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (owner) { // Check whether buffers are still valid. buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: ConstBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP galera-3-25.3.20/asio/asio/detail/signal_handler.hpp0000644000015300001660000000463213042054732021756 0ustar jenkinsjenkins// // detail/signal_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_HANDLER_HPP #define ASIO_DETAIL_SIGNAL_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/signal_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class signal_handler : public signal_op { public: ASIO_DEFINE_HANDLER_PTR(signal_handler); signal_handler(Handler& h) : signal_op(&signal_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. signal_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; ASIO_HANDLER_COMPLETION((h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(h->handler_, h->ec_, h->signal_number_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SIGNAL_HANDLER_HPP galera-3-25.3.20/asio/asio/detail/posix_tss_ptr.hpp0000644000015300001660000000322313042054732021717 0ustar jenkinsjenkins// // detail/posix_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_TSS_PTR_HPP #define ASIO_DETAIL_POSIX_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper function to create thread-specific storage. ASIO_DECL void posix_tss_ptr_create(pthread_key_t& key); template class posix_tss_ptr : private noncopyable { public: // Constructor. posix_tss_ptr() { posix_tss_ptr_create(tss_key_); } // Destructor. ~posix_tss_ptr() { ::pthread_key_delete(tss_key_); } // Get the value. operator T*() const { return static_cast(::pthread_getspecific(tss_key_)); } // Set the value. void operator=(T* value) { ::pthread_setspecific(tss_key_, value); } private: // Thread-specific storage to allow unlocked access to determine whether a // thread is a member of the pool. pthread_key_t tss_key_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_tss_ptr.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_TSS_PTR_HPP galera-3-25.3.20/asio/asio/detail/reactive_null_buffers_op.hpp0000644000015300001660000000522213042054732024046 0ustar jenkinsjenkins// // detail/reactive_null_buffers_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP #define ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_null_buffers_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(reactive_null_buffers_op); reactive_null_buffers_op(Handler& handler) : reactor_op(&reactive_null_buffers_op::do_perform, &reactive_null_buffers_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static bool do_perform(reactor_op*) { return true; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_null_buffers_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP galera-3-25.3.20/asio/asio/detail/std_mutex.hpp0000644000015300001660000000242213042054732021013 0ustar jenkinsjenkins// // detail/std_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_MUTEX_HPP #define ASIO_DETAIL_STD_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event; class std_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. std_mutex() { } // Destructor. ~std_mutex() { } // Lock the mutex. void lock() { mutex_.lock(); } // Unlock the mutex. void unlock() { mutex_.unlock(); } private: friend class std_event; std::mutex mutex_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/handler_tracking.hpp0000644000015300001660000001111413042054732022274 0ustar jenkinsjenkins// // detail/handler_tracking.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_TRACKING_HPP #define ASIO_DETAIL_HANDLER_TRACKING_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_HANDLER_TRACKING) # include "asio/error_code.hpp" # include "asio/detail/cstdint.hpp" # include "asio/detail/static_mutex.hpp" # include "asio/detail/tss_ptr.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_ENABLE_HANDLER_TRACKING) class handler_tracking { public: class completion; // Base class for objects containing tracked handlers. class tracked_handler { private: // Only the handler_tracking class will have access to the id. friend class handler_tracking; friend class completion; uint64_t id_; protected: // Constructor initialises with no id. tracked_handler() : id_(0) {} // Prevent deletion through this type. ~tracked_handler() {} }; // Initialise the tracking system. ASIO_DECL static void init(); // Record the creation of a tracked handler. ASIO_DECL static void creation(tracked_handler* h, const char* object_type, void* object, const char* op_name); class completion { public: // Constructor records that handler is to be invoked with no arguments. ASIO_DECL explicit completion(tracked_handler* h); // Destructor records only when an exception is thrown from the handler, or // if the memory is being freed without the handler having been invoked. ASIO_DECL ~completion(); // Records that handler is to be invoked with no arguments. ASIO_DECL void invocation_begin(); // Records that handler is to be invoked with one arguments. ASIO_DECL void invocation_begin(const asio::error_code& ec); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, std::size_t bytes_transferred); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, int signal_number); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, const char* arg); // Record that handler invocation has ended. ASIO_DECL void invocation_end(); private: friend class handler_tracking; uint64_t id_; bool invoked_; completion* next_; }; // Record an operation that affects pending handlers. ASIO_DECL static void operation(const char* object_type, void* object, const char* op_name); // Write a line of output. ASIO_DECL static void write_line(const char* format, ...); private: struct tracking_state; ASIO_DECL static tracking_state* get_state(); }; # define ASIO_INHERIT_TRACKED_HANDLER \ : public asio::detail::handler_tracking::tracked_handler # define ASIO_ALSO_INHERIT_TRACKED_HANDLER \ , public asio::detail::handler_tracking::tracked_handler # define ASIO_HANDLER_TRACKING_INIT \ asio::detail::handler_tracking::init() # define ASIO_HANDLER_CREATION(args) \ asio::detail::handler_tracking::creation args # define ASIO_HANDLER_COMPLETION(args) \ asio::detail::handler_tracking::completion tracked_completion args # define ASIO_HANDLER_INVOCATION_BEGIN(args) \ tracked_completion.invocation_begin args # define ASIO_HANDLER_INVOCATION_END \ tracked_completion.invocation_end() # define ASIO_HANDLER_OPERATION(args) \ asio::detail::handler_tracking::operation args #else // defined(ASIO_ENABLE_HANDLER_TRACKING) # define ASIO_INHERIT_TRACKED_HANDLER # define ASIO_ALSO_INHERIT_TRACKED_HANDLER # define ASIO_HANDLER_TRACKING_INIT (void)0 # define ASIO_HANDLER_CREATION(args) (void)0 # define ASIO_HANDLER_COMPLETION(args) (void)0 # define ASIO_HANDLER_INVOCATION_BEGIN(args) (void)0 # define ASIO_HANDLER_INVOCATION_END (void)0 # define ASIO_HANDLER_OPERATION(args) (void)0 #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/handler_tracking.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_HANDLER_TRACKING_HPP galera-3-25.3.20/asio/asio/detail/call_stack.hpp0000644000015300001660000000554413042054732021107 0ustar jenkinsjenkins// // detail/call_stack.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CALL_STACK_HPP #define ASIO_DETAIL_CALL_STACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to determine whether or not the current thread is inside an // invocation of io_service::run() for a specified io_service object. template class call_stack { public: // Context class automatically pushes the key/value pair on to the stack. class context : private noncopyable { public: // Push the key on to the stack. explicit context(Key* k) : key_(k), next_(call_stack::top_) { value_ = reinterpret_cast(this); call_stack::top_ = this; } // Push the key/value pair on to the stack. context(Key* k, Value& v) : key_(k), value_(&v), next_(call_stack::top_) { call_stack::top_ = this; } // Pop the key/value pair from the stack. ~context() { call_stack::top_ = next_; } // Find the next context with the same key. Value* next_by_key() const { context* elem = next_; while (elem) { if (elem->key_ == key_) return elem->value_; elem = elem->next_; } return 0; } private: friend class call_stack; // The key associated with the context. Key* key_; // The value associated with the context. Value* value_; // The next element in the stack. context* next_; }; friend class context; // Determine whether the specified owner is on the stack. Returns address of // key if present, 0 otherwise. static Value* contains(Key* k) { context* elem = top_; while (elem) { if (elem->key_ == k) return elem->value_; elem = elem->next_; } return 0; } // Obtain the value at the top of the stack. static Value* top() { context* elem = top_; return elem ? elem->value_ : 0; } private: // The top of the stack of calls for the current thread. static tss_ptr top_; }; template tss_ptr::context> call_stack::top_; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CALL_STACK_HPP galera-3-25.3.20/asio/asio/detail/winrt_ssocket_service.hpp0000644000015300001660000001451513042054732023423 0ustar jenkinsjenkins// // detail/winrt_ssocket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP #define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/winrt_socket_connect_op.hpp" #include "asio/detail/winrt_ssocket_service_base.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_ssocket_service : public winrt_ssocket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type; // The implementation type of the socket. struct implementation_type : base_implementation_type { // Default constructor. implementation_type() : base_implementation_type(), protocol_(endpoint_type().protocol()) { } // The protocol associated with the socket. protocol_type protocol_; }; // Constructor. winrt_ssocket_service(asio::io_service& io_service) : winrt_ssocket_service_base(io_service) { } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, winrt_ssocket_service& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, typename winrt_ssocket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } try { impl.socket_ = ref new Windows::Networking::Sockets::StreamSocket; impl.protocol_ = protocol; ec = asio::error_code(); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } impl.socket_ = native_socket; impl.protocol_ = protocol; ec = asio::error_code(); return ec; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; endpoint.resize(do_get_endpoint(impl, true, endpoint.data(), endpoint.size(), ec)); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; endpoint.resize(do_get_endpoint(impl, false, endpoint.data(), endpoint.size(), ec)); return endpoint; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { return do_set_option(impl, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); } // Get a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); do_get_option(impl, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { return do_connect(impl, peer_endpoint.data(), ec); } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_connect")); start_connect_op(impl, peer_endpoint.data(), p.p, is_continuation); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/select_reactor.hpp0000644000015300001660000001650413042054732022003 0ustar jenkinsjenkins// // detail/select_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SELECT_REACTOR_HPP #define ASIO_DETAIL_SELECT_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include #include "asio/detail/fd_set_adapter.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/thread.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class select_reactor : public asio::detail::service_base { public: #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) enum op_types { read_op = 0, write_op = 1, except_op = 2, max_select_ops = 3, connect_op = 3, max_ops = 4 }; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) enum op_types { read_op = 0, write_op = 1, except_op = 2, max_select_ops = 3, connect_op = 1, max_ops = 3 }; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Per-descriptor data. struct per_descriptor_data { }; // Constructor. ASIO_DECL select_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~select_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task, but only if the reactor is not in its own thread. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op, bool is_continuation, bool); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data&, bool closing); // Remote the descriptor's registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run select once until interrupted or events are ready to be dispatched. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: #if defined(ASIO_HAS_IOCP) // Run the select loop in the thread. ASIO_DECL void run_thread(); // Entry point for the select loop thread. ASIO_DECL static void call_run_thread(select_reactor* reactor); #endif // defined(ASIO_HAS_IOCP) // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the select call. ASIO_DECL timeval* get_timeout(timeval& tv); // Cancel all operations associated with the given descriptor. This function // does not acquire the select_reactor's mutex. ASIO_DECL void cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // The interrupter is used to break a blocking select call. select_interrupter interrupter_; // The queues of read, write and except operations. reactor_op_queue op_queue_[max_ops]; // The file descriptor sets to be passed to the select system call. fd_set_adapter fd_sets_[max_select_ops]; // The timer queues. timer_queue_set timer_queues_; #if defined(ASIO_HAS_IOCP) // Does the reactor loop thread need to stop. bool stop_thread_; // The thread that is running the reactor loop. asio::detail::thread* thread_; #endif // defined(ASIO_HAS_IOCP) // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/select_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/select_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_SELECT_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/timer_scheduler_fwd.hpp0000644000015300001660000000215413042054732023017 0ustar jenkinsjenkins// // detail/timer_scheduler_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP #define ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) typedef class winrt_timer_scheduler timer_scheduler; #elif defined(ASIO_HAS_IOCP) typedef class win_iocp_io_service timer_scheduler; #elif defined(ASIO_HAS_EPOLL) typedef class epoll_reactor timer_scheduler; #elif defined(ASIO_HAS_KQUEUE) typedef class kqueue_reactor timer_scheduler; #elif defined(ASIO_HAS_DEV_POLL) typedef class dev_poll_reactor timer_scheduler; #else typedef class select_reactor timer_scheduler; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP galera-3-25.3.20/asio/asio/detail/task_io_service_operation.hpp0000644000015300001660000000347013042054732024234 0ustar jenkinsjenkins// // detail/task_io_service_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TASK_IO_SERVICE_OPERATION_HPP #define ASIO_DETAIL_TASK_IO_SERVICE_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/error_code.hpp" #include "asio/detail/handler_tracking.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class task_io_service; // Base class for all operations. A function pointer is used instead of virtual // functions to avoid the associated overhead. class task_io_service_operation ASIO_INHERIT_TRACKED_HANDLER { public: void complete(task_io_service& owner, const asio::error_code& ec, std::size_t bytes_transferred) { func_(&owner, this, ec, bytes_transferred); } void destroy() { func_(0, this, asio::error_code(), 0); } protected: typedef void (*func_type)(task_io_service*, task_io_service_operation*, const asio::error_code&, std::size_t); task_io_service_operation(func_type func) : next_(0), func_(func), task_result_(0) { } // Prevents deletion through this type. ~task_io_service_operation() { } private: friend class op_queue_access; task_io_service_operation* next_; func_type func_; protected: friend class task_io_service; unsigned int task_result_; // Passed into bytes transferred. }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TASK_IO_SERVICE_OPERATION_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_accept_op.hpp0000644000015300001660000001177013042054732024201 0ustar jenkinsjenkins// // detail/win_iocp_socket_accept_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_accept_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_accept_op); win_iocp_socket_accept_op(win_iocp_socket_service_base& socket_service, socket_type socket, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, bool enable_connection_aborted, Handler& handler) : operation(&win_iocp_socket_accept_op::do_complete), socket_service_(socket_service), socket_(socket), peer_(peer), protocol_(protocol), peer_endpoint_(peer_endpoint), enable_connection_aborted_(enable_connection_aborted), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } socket_holder& new_socket() { return new_socket_; } void* output_buffer() { return output_buffer_; } DWORD address_length() { return sizeof(sockaddr_storage_type) + 16; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_accept_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner) { typename Protocol::endpoint peer_endpoint; std::size_t addr_len = peer_endpoint.capacity(); socket_ops::complete_iocp_accept(o->socket_, o->output_buffer(), o->address_length(), peer_endpoint.data(), &addr_len, o->new_socket_.get(), ec); // Restart the accept operation if we got the connection_aborted error // and the enable_connection_aborted socket option is not set. if (ec == asio::error::connection_aborted && !o->enable_connection_aborted_) { o->reset(); o->socket_service_.restart_accept_op(o->socket_, o->new_socket_, o->protocol_.family(), o->protocol_.type(), o->protocol_.protocol(), o->output_buffer(), o->address_length(), o); p.v = p.p = 0; return; } // If the socket was successfully accepted, transfer ownership of the // socket to the peer object. if (!ec) { o->peer_.assign(o->protocol_, typename Socket::native_handle_type( o->new_socket_.get(), peer_endpoint), ec); if (!ec) o->new_socket_.release(); } // Pass endpoint back to caller. if (o->peer_endpoint_) *o->peer_endpoint_ = peer_endpoint; } ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: win_iocp_socket_service_base& socket_service_; socket_type socket_; socket_holder new_socket_; Socket& peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; unsigned char output_buffer_[(sizeof(sockaddr_storage_type) + 16) * 2]; bool enable_connection_aborted_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP galera-3-25.3.20/asio/asio/detail/task_io_service.hpp0000644000015300001660000001376413042054732022163 0ustar jenkinsjenkins// // detail/task_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TASK_IO_SERVICE_HPP #define ASIO_DETAIL_TASK_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/error_code.hpp" #include "asio/io_service.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/event.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_fwd.hpp" #include "asio/detail/task_io_service_operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct task_io_service_thread_info; class task_io_service : public asio::detail::service_base { public: typedef task_io_service_operation operation; // Constructor. Specifies the number of concurrent threads that are likely to // run the io_service. If set to 1 certain optimisation are performed. ASIO_DECL task_io_service(asio::io_service& io_service, std::size_t concurrency_hint = 0); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Initialise the task, if required. ASIO_DECL void init_task(); // Run the event loop until interrupted or no more work. ASIO_DECL std::size_t run(asio::error_code& ec); // Run until interrupted or one operation is performed. ASIO_DECL std::size_t run_one(asio::error_code& ec); // Poll for operations without blocking. ASIO_DECL std::size_t poll(asio::error_code& ec); // Poll for one operation without blocking. ASIO_DECL std::size_t poll_one(asio::error_code& ec); // Interrupt the event processing loop. ASIO_DECL void stop(); // Determine whether the io_service is stopped. ASIO_DECL bool stopped() const; // Reset in preparation for a subsequent run invocation. ASIO_DECL void reset(); // Notify that some work has started. void work_started() { ++outstanding_work_; } // Notify that some work has finished. void work_finished() { if (--outstanding_work_ == 0) stop(); } // Return whether a handler can be dispatched immediately. bool can_dispatch() { return thread_call_stack::contains(this) != 0; } // Request invocation of the given handler. template void dispatch(Handler& handler); // Request invocation of the given handler and return immediately. template void post(Handler& handler); // Request invocation of the given operation and return immediately. Assumes // that work_started() has not yet been called for the operation. ASIO_DECL void post_immediate_completion( operation* op, bool is_continuation); // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operation. ASIO_DECL void post_deferred_completion(operation* op); // Request invocation of the given operations and return immediately. Assumes // that work_started() was previously called for each operation. ASIO_DECL void post_deferred_completions(op_queue& ops); // Process unfinished operations as part of a shutdown_service operation. // Assumes that work_started() was previously called for the operations. ASIO_DECL void abandon_operations(op_queue& ops); private: // Structure containing thread-specific data. typedef task_io_service_thread_info thread_info; // Enqueue the given operation following a failed attempt to dispatch the // operation for immediate invocation. ASIO_DECL void do_dispatch(operation* op); // Run at most one operation. May block. ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock, thread_info& this_thread, const asio::error_code& ec); // Poll for at most one operation. ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock, thread_info& this_thread, const asio::error_code& ec); // Stop the task and all idle threads. ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock); // Wake a single idle thread, or the task, and always unlock the mutex. ASIO_DECL void wake_one_thread_and_unlock( mutex::scoped_lock& lock); // Helper class to perform task-related operations on block exit. struct task_cleanup; friend struct task_cleanup; // Helper class to call work-related operations on block exit. struct work_cleanup; friend struct work_cleanup; // Whether to optimise for single-threaded use cases. const bool one_thread_; // Mutex to protect access to internal data. mutable mutex mutex_; // Event to wake up blocked threads. event wakeup_event_; // The task to be run by this service. reactor* task_; // Operation object to represent the position of the task in the queue. struct task_operation : operation { task_operation() : operation(0) {} } task_operation_; // Whether the task has been interrupted. bool task_interrupted_; // The count of unfinished work. atomic_count outstanding_work_; // The queue of handlers that are ready to be delivered. op_queue op_queue_; // Flag to indicate that the dispatcher has been stopped. bool stopped_; // Flag to indicate that the dispatcher has been shut down. bool shutdown_; // Per-thread call stack to track the state of each thread in the io_service. typedef call_stack thread_call_stack; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/task_io_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/task_io_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_TASK_IO_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/posix_fd_set_adapter.hpp0000644000015300001660000000546213042054732023174 0ustar jenkinsjenkins// // detail/posix_fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(__CYGWIN__) \ && !defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Adapts the FD_SET type to meet the Descriptor_Set concept's requirements. class posix_fd_set_adapter : noncopyable { public: posix_fd_set_adapter() : max_descriptor_(invalid_socket) { using namespace std; // Needed for memset on Solaris. FD_ZERO(&fd_set_); } void reset() { using namespace std; // Needed for memset on Solaris. FD_ZERO(&fd_set_); } bool set(socket_type descriptor) { if (descriptor < (socket_type)FD_SETSIZE) { if (max_descriptor_ == invalid_socket || descriptor > max_descriptor_) max_descriptor_ = descriptor; FD_SET(descriptor, &fd_set_); return true; } return false; } void set(reactor_op_queue& operations, op_queue& ops) { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; if (!set(op_iter->first)) { asio::error_code ec(error::fd_set_failure); operations.cancel_operations(op_iter, ops, ec); } } } bool is_set(socket_type descriptor) const { return FD_ISSET(descriptor, &fd_set_) != 0; } operator fd_set*() { return &fd_set_; } socket_type max_descriptor() const { return max_descriptor_; } void perform(reactor_op_queue& operations, op_queue& ops) const { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; if (is_set(op_iter->first)) operations.perform_operations(op_iter, ops); } } private: mutable fd_set fd_set_; socket_type max_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(__CYGWIN__) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP galera-3-25.3.20/asio/asio/detail/timer_queue_base.hpp0000644000015300001660000000317513042054732022323 0ustar jenkinsjenkins// // detail/timer_queue_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_BASE_HPP #define ASIO_DETAIL_TIMER_QUEUE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class timer_queue_base : private noncopyable { public: // Constructor. timer_queue_base() : next_(0) {} // Destructor. virtual ~timer_queue_base() {} // Whether there are no timers in the queue. virtual bool empty() const = 0; // Get the time to wait until the next timer. virtual long wait_duration_msec(long max_duration) const = 0; // Get the time to wait until the next timer. virtual long wait_duration_usec(long max_duration) const = 0; // Dequeue all ready timers. virtual void get_ready_timers(op_queue& ops) = 0; // Dequeue all timers. virtual void get_all_timers(op_queue& ops) = 0; private: friend class timer_queue_set; // Next timer queue in the set. timer_queue_base* next_; }; template class timer_queue; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TIMER_QUEUE_BASE_HPP galera-3-25.3.20/asio/asio/detail/scoped_lock.hpp0000644000015300001660000000351313042054732021266 0ustar jenkinsjenkins// // detail/scoped_lock.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCOPED_LOCK_HPP #define ASIO_DETAIL_SCOPED_LOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to lock and unlock a mutex automatically. template class scoped_lock : private noncopyable { public: // Tag type used to distinguish constructors. enum adopt_lock_t { adopt_lock }; // Constructor adopts a lock that is already held. scoped_lock(Mutex& m, adopt_lock_t) : mutex_(m), locked_(true) { } // Constructor acquires the lock. explicit scoped_lock(Mutex& m) : mutex_(m) { mutex_.lock(); locked_ = true; } // Destructor releases the lock. ~scoped_lock() { if (locked_) mutex_.unlock(); } // Explicitly acquire the lock. void lock() { if (!locked_) { mutex_.lock(); locked_ = true; } } // Explicitly release the lock. void unlock() { if (locked_) { mutex_.unlock(); locked_ = false; } } // Test whether the lock is held. bool locked() const { return locked_; } // Get the underlying mutex. Mutex& mutex() { return mutex_; } private: // The underlying mutex. Mutex& mutex_; // Whether the mutex is currently locked or unlocked. bool locked_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCOPED_LOCK_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_recvfrom_op.hpp0000644000015300001660000001042013042054732024547 0ustar jenkinsjenkins// // detail/reactive_socket_recvfrom_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recvfrom_op_base : public reactor_op { public: reactive_socket_recvfrom_op_base(socket_type socket, int protocol_type, const MutableBufferSequence& buffers, Endpoint& endpoint, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_recvfrom_op_base::do_perform, complete_func), socket_(socket), protocol_type_(protocol_type), buffers_(buffers), sender_endpoint_(endpoint), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_recvfrom_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); std::size_t addr_len = o->sender_endpoint_.capacity(); bool result = socket_ops::non_blocking_recvfrom(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->sender_endpoint_.data(), &addr_len, o->ec_, o->bytes_transferred_); if (result && !o->ec_) o->sender_endpoint_.resize(addr_len); return result; } private: socket_type socket_; int protocol_type_; MutableBufferSequence buffers_; Endpoint& sender_endpoint_; socket_base::message_flags flags_; }; template class reactive_socket_recvfrom_op : public reactive_socket_recvfrom_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvfrom_op); reactive_socket_recvfrom_op(socket_type socket, int protocol_type, const MutableBufferSequence& buffers, Endpoint& endpoint, socket_base::message_flags flags, Handler& handler) : reactive_socket_recvfrom_op_base( socket, protocol_type, buffers, endpoint, flags, &reactive_socket_recvfrom_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recvfrom_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_service.hpp0000644000015300001660000003554213042054732023702 0ustar jenkinsjenkins// // detail/reactive_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_socket_accept_op.hpp" #include "asio/detail/reactive_socket_connect_op.hpp" #include "asio/detail/reactive_socket_recvfrom_op.hpp" #include "asio/detail/reactive_socket_sendto_op.hpp" #include "asio/detail/reactive_socket_service_base.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_service : public reactive_socket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef socket_type native_handle_type; // The implementation type of the socket. struct implementation_type : reactive_socket_service_base::base_implementation_type { // Default constructor. implementation_type() : protocol_(endpoint_type().protocol()) { } // The protocol associated with the socket. protocol_type protocol_; }; // Constructor. reactive_socket_service(asio::io_service& io_service) : reactive_socket_service_base(io_service) { } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, reactive_socket_service_base& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, typename reactive_socket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (!do_open(impl, protocol.family(), protocol.type(), protocol.protocol(), ec)) impl.protocol_ = protocol; return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (!do_assign(impl, protocol.type(), native_socket, ec)) impl.protocol_ = protocol; return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type& impl) { return impl.socket_; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec); return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { socket_ops::setsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); socket_ops::getsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getpeername(impl.socket_, endpoint.data(), &addr_len, false, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_sendto(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, destination.data(), destination.size(), ec); } // Wait until data can be sent without blocking. size_t send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_sendto_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, destination, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to")); start_op(impl, reactor::write_op, p.p, is_continuation, true, false); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); std::size_t addr_len = sender_endpoint.capacity(); std::size_t bytes_recvd = socket_ops::sync_recvfrom( impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, sender_endpoint.data(), &addr_len, ec); if (!ec) sender_endpoint.resize(addr_len); return bytes_recvd; } // Wait until data can be received without blocking. size_t receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recvfrom_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; int protocol = impl.protocol_.type(); p.p = new (p.v) op(impl.socket_, protocol, buffers, sender_endpoint, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, true, false); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from(null_buffers)")); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Accept a new connection. template asio::error_code accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec) { // We cannot accept a socket that is already open. if (peer.is_open()) { ec = asio::error::already_open; return ec; } std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0; socket_holder new_socket(socket_ops::sync_accept(impl.socket_, impl.state_, peer_endpoint ? peer_endpoint->data() : 0, peer_endpoint ? &addr_len : 0, ec)); // On success, assign new connection to peer socket object. if (new_socket.get() != invalid_socket) { if (peer_endpoint) peer_endpoint->resize(addr_len); if (!peer.assign(impl.protocol_, new_socket.get(), ec)) new_socket.release(); } return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, peer, impl.protocol_, peer_endpoint, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_accept")); start_accept_op(impl, p.p, is_continuation, peer.is_open()); p.v = p.p = 0; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { socket_ops::sync_connect(impl.socket_, peer_endpoint.data(), peer_endpoint.size(), ec); return ec; } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_connect")); start_connect_op(impl, p.p, is_continuation, peer_endpoint.data(), peer_endpoint.size()); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/winsock_init.hpp0000644000015300001660000000602213042054732021477 0ustar jenkinsjenkins// // detail/winsock_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINSOCK_INIT_HPP #define ASIO_DETAIL_WINSOCK_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winsock_init_base { protected: // Structure to track result of initialisation and number of uses. POD is used // to ensure that the values are zero-initialised prior to any code being run. struct data { long init_count_; long result_; }; ASIO_DECL static void startup(data& d, unsigned char major, unsigned char minor); ASIO_DECL static void manual_startup(data& d); ASIO_DECL static void cleanup(data& d); ASIO_DECL static void manual_cleanup(data& d); ASIO_DECL static void throw_on_error(data& d); }; template class winsock_init : private winsock_init_base { public: winsock_init(bool allow_throw = true) { startup(data_, Major, Minor); if (allow_throw) throw_on_error(data_); } winsock_init(const winsock_init&) { startup(data_, Major, Minor); throw_on_error(data_); } ~winsock_init() { cleanup(data_); } // This class may be used to indicate that user code will manage Winsock // initialisation and cleanup. This may be required in the case of a DLL, for // example, where it is not safe to initialise Winsock from global object // constructors. // // To prevent asio from initialising Winsock, the object must be constructed // before any Asio's own global objects. With MSVC, this may be accomplished // by adding the following code to the DLL: // // #pragma warning(push) // #pragma warning(disable:4073) // #pragma init_seg(lib) // asio::detail::winsock_init<>::manual manual_winsock_init; // #pragma warning(pop) class manual { public: manual() { manual_startup(data_); } manual(const manual&) { manual_startup(data_); } ~manual() { manual_cleanup(data_); } }; private: friend class manual; static data data_; }; template winsock_init_base::data winsock_init::data_; // Static variable to ensure that winsock is initialised before main, and // therefore before any other threads can get started. static const winsock_init<>& winsock_init_instance = winsock_init<>(false); } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winsock_init.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_WINSOCK_INIT_HPP galera-3-25.3.20/asio/asio/detail/scoped_ptr.hpp0000644000015300001660000000236113042054732021143 0ustar jenkinsjenkins// // detail/scoped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCOPED_PTR_HPP #define ASIO_DETAIL_SCOPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class scoped_ptr { public: // Constructor. explicit scoped_ptr(T* p = 0) : p_(p) { } // Destructor. ~scoped_ptr() { delete p_; } // Access. T* get() { return p_; } // Access. T* operator->() { return p_; } // Dereference. T& operator*() { return *p_; } // Reset pointer. void reset(T* p = 0) { delete p_; p_ = p; } private: // Disallow copying and assignment. scoped_ptr(const scoped_ptr&); scoped_ptr& operator=(const scoped_ptr&); T* p_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCOPED_PTR_HPP galera-3-25.3.20/asio/asio/detail/winrt_utils.hpp0000644000015300001660000000510213042054732021360 0ustar jenkinsjenkins// // detail/winrt_utils.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_UTILS_HPP #define ASIO_DETAIL_WINRT_UTILS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include #include #include #include #include #include #include #include "asio/buffer.hpp" #include "asio/error_code.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace winrt_utils { inline Platform::String^ string(const char* from) { std::wstring tmp(from, from + std::strlen(from)); return ref new Platform::String(tmp.c_str()); } inline Platform::String^ string(const std::string& from) { std::wstring tmp(from.begin(), from.end()); return ref new Platform::String(tmp.c_str()); } inline std::string string(Platform::String^ from) { std::wstring_convert> converter; return converter.to_bytes(from->Data()); } inline Platform::String^ string(unsigned short from) { return string(std::to_string(from)); } template inline Platform::String^ string(const T& from) { return string(from.to_string()); } inline int integer(Platform::String^ from) { return _wtoi(from->Data()); } template inline Windows::Networking::HostName^ host_name(const T& from) { return ref new Windows::Networking::HostName((string)(from)); } template inline Windows::Storage::Streams::IBuffer^ buffer_dup( const ConstBufferSequence& buffers) { using Microsoft::WRL::ComPtr; std::size_t size = asio::buffer_size(buffers); auto b = ref new Windows::Storage::Streams::Buffer(size); ComPtr insp = reinterpret_cast(b); ComPtr bacc; insp.As(&bacc); byte* bytes = nullptr; bacc->Buffer(&bytes); asio::buffer_copy(asio::buffer(bytes, size), buffers); b->Length = size; return b; } } // namespace winrt_utils } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_UTILS_HPP galera-3-25.3.20/asio/asio/detail/win_event.hpp0000644000015300001660000000507113042054732021000 0ustar jenkinsjenkins// // detail/win_event.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_EVENT_HPP #define ASIO_DETAIL_WIN_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_event : private noncopyable { public: // Constructor. ASIO_DECL win_event(); // Destructor. ASIO_DECL ~win_event(); // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; ::SetEvent(events_[0]); } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) ::SetEvent(events_[1]); } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); ::SetEvent(events_[1]); return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; ::ResetEvent(events_[0]); state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); while ((state_ & 1) == 0) { state_ += 2; lock.unlock(); ::WaitForMultipleObjects(2, events_, false, INFINITE); lock.lock(); state_ -= 2; } } private: HANDLE events_[2]; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_EVENT_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_handle_read_op.hpp0000644000015300001660000000652113042054732023616 0ustar jenkinsjenkins// // detail/win_iocp_handle_read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_handle_read_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_read_op); win_iocp_handle_read_op( const MutableBufferSequence& buffers, Handler& handler) : operation(&win_iocp_handle_read_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_handle_read_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (owner) { // Check whether buffers are still valid. buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_HANDLE_EOF) ec = asio::error::eof; // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP galera-3-25.3.20/asio/asio/detail/null_mutex.hpp0000644000015300001660000000217513042054732021200 0ustar jenkinsjenkins// // detail/null_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_MUTEX_HPP #define ASIO_DETAIL_NULL_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. null_mutex() { } // Destructor. ~null_mutex() { } // Lock the mutex. void lock() { } // Unlock the mutex. void unlock() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_recv_op.hpp0000644000015300001660000000757213042054732023701 0ustar jenkinsjenkins// // detail/reactive_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recv_op_base : public reactor_op { public: reactive_socket_recv_op_base(socket_type socket, socket_ops::state_type state, const MutableBufferSequence& buffers, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_recv_op_base::do_perform, complete_func), socket_(socket), state_(state), buffers_(buffers), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_recv_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_recv(o->socket_, bufs.buffers(), bufs.count(), o->flags_, (o->state_ & socket_ops::stream_oriented) != 0, o->ec_, o->bytes_transferred_); } private: socket_type socket_; socket_ops::state_type state_; MutableBufferSequence buffers_; socket_base::message_flags flags_; }; template class reactive_socket_recv_op : public reactive_socket_recv_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recv_op); reactive_socket_recv_op(socket_type socket, socket_ops::state_type state, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) : reactive_socket_recv_op_base(socket, state, buffers, flags, &reactive_socket_recv_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP galera-3-25.3.20/asio/asio/detail/throw_exception.hpp0000644000015300001660000000257713042054732022233 0ustar jenkinsjenkins// // detail/throw_exception.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THROW_EXCEPTION_HPP #define ASIO_DETAIL_THROW_EXCEPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_THROW_EXCEPTION) # include #endif // defined(ASIO_BOOST_THROW_EXCEPTION) namespace asio { namespace detail { #if defined(ASIO_HAS_BOOST_THROW_EXCEPTION) using boost::throw_exception; #else // defined(ASIO_HAS_BOOST_THROW_EXCEPTION) // Declare the throw_exception function for all targets. template void throw_exception(const Exception& e); // Only define the throw_exception function when exceptions are enabled. // Otherwise, it is up to the application to provide a definition of this // function. # if !defined(ASIO_NO_EXCEPTIONS) template void throw_exception(const Exception& e) { throw e; } # endif // !defined(ASIO_NO_EXCEPTIONS) #endif // defined(ASIO_HAS_BOOST_THROW_EXCEPTION) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THROW_EXCEPTION_HPP galera-3-25.3.20/asio/asio/detail/winrt_socket_connect_op.hpp0000644000015300001660000000525713042054732023732 0ustar jenkinsjenkins// // detail/winrt_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_connect_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_connect_op); winrt_socket_connect_op(Handler& handler) : winrt_async_op(&winrt_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_connect_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP galera-3-25.3.20/asio/asio/detail/gcc_sync_fenced_block.hpp0000644000015300001660000000302613042054732023246 0ustar jenkinsjenkins// // detail/gcc_sync_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_sync_fenced_block : private noncopyable { public: enum half_or_full_t { half, full }; // Constructor. explicit gcc_sync_fenced_block(half_or_full_t) : value_(0) { __sync_lock_test_and_set(&value_, 1); } // Destructor. ~gcc_sync_fenced_block() { __sync_lock_release(&value_); } private: int value_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) // && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) // && !defined(__INTEL_COMPILER) && !defined(__ICL) // && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) #endif // ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/null_socket_service.hpp0000644000015300001660000003567613042054732023062 0ustar jenkinsjenkins// // detail/null_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP #define ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class null_socket_service { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef int native_handle_type; // The implementation type of the socket. struct implementation_type { }; // Constructor. null_socket_service(asio::io_service& io_service) : io_service_(io_service) { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Construct a new socket implementation. void construct(implementation_type&) { } // Move-construct a new socket implementation. void move_construct(implementation_type&, implementation_type&) { } // Move-assign from another socket implementation. void move_assign(implementation_type&, null_socket_service&, implementation_type&) { } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type&, typename null_socket_service::implementation_type&) { } // Destroy a socket implementation. void destroy(implementation_type&) { } // Open a new socket implementation. asio::error_code open(implementation_type&, const protocol_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type&, const protocol_type&, const native_handle_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is open. bool is_open(const implementation_type&) const { return false; } // Destroy a socket implementation. asio::error_code close(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type&) { return 0; } // Cancel all operations associated with the socket. asio::error_code cancel(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is at the out-of-band data mark. bool at_mark(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return false; } // Determine the number of bytes available for reading. std::size_t available(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return 0; } // Place the socket into the state where it will listen for new connections. asio::error_code listen(implementation_type&, int, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(implementation_type&, IO_Control_Command&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const implementation_type&) const { return false; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const implementation_type&) const { return false; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(implementation_type&, socket_base::shutdown_type, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code set_option(implementation_type&, const Option&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type&, Option&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return endpoint_type(); } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return endpoint_type(); } // Send the given data to the peer. template std::size_t send(implementation_type&, const ConstBufferSequence&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be sent without blocking. std::size_t send(implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(implementation_type&, const ConstBufferSequence&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Start an asynchronous wait until data can be sent without blocking. template void async_send(implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data from the peer. Returns the number of bytes received. template std::size_t receive(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive(implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive(implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data with associated flags. Returns the number of bytes // received. template std::size_t receive_with_flags(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, socket_base::message_flags&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive_with_flags(implementation_type&, const null_buffers&, socket_base::message_flags, socket_base::message_flags&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, socket_base::message_flags&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive_with_flags(implementation_type&, const null_buffers&, socket_base::message_flags, socket_base::message_flags&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template std::size_t send_to(implementation_type&, const ConstBufferSequence&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be sent without blocking. std::size_t send_to(implementation_type&, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type&, const ConstBufferSequence&, const endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type&, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template std::size_t receive_from(implementation_type&, const MutableBufferSequence&, endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive_from(implementation_type&, const null_buffers&, endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type&, const MutableBufferSequence&, endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive_from(implementation_type&, const null_buffers&, endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Accept a new connection. template asio::error_code accept(implementation_type&, Socket&, endpoint_type*, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type&, Socket&, endpoint_type*, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; io_service_.post(detail::bind_handler(handler, ec)); } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Start an asynchronous connect. template void async_connect(implementation_type&, const endpoint_type&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; io_service_.post(detail::bind_handler(handler, ec)); } private: asio::io_service& io_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/winrt_socket_send_op.hpp0000644000015300001660000000607713042054732023233 0ustar jenkinsjenkins// // detail/winrt_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_send_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_send_op); winrt_socket_send_op(const ConstBufferSequence& buffers, Handler& handler) : winrt_async_op(&winrt_socket_send_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->result_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: ConstBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP galera-3-25.3.20/asio/asio/detail/resolve_op.hpp0000644000015300001660000001006013042054732021151 0ustar jenkinsjenkins// // detail/resolve_op.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVE_OP_HPP #define ASIO_DETAIL_RESOLVE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolve_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(resolve_op); typedef asio::ip::basic_resolver_query query_type; typedef asio::ip::basic_resolver_iterator iterator_type; resolve_op(socket_ops::weak_cancel_token_type cancel_token, const query_type& query, io_service_impl& ios, Handler& handler) : operation(&resolve_op::do_complete), cancel_token_(cancel_token), query_(query), io_service_impl_(ios), handler_(ASIO_MOVE_CAST(Handler)(handler)), addrinfo_(0) { } ~resolve_op() { if (addrinfo_) socket_ops::freeaddrinfo(addrinfo_); } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the operation object. resolve_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner && owner != &o->io_service_impl_) { // The operation is being run on the worker io_service. Time to perform // the resolver operation. // Perform the blocking host resolution operation. socket_ops::background_getaddrinfo(o->cancel_token_, o->query_.host_name().c_str(), o->query_.service_name().c_str(), o->query_.hints(), &o->addrinfo_, o->ec_); // Pass operation back to main io_service for completion. o->io_service_impl_.post_deferred_completion(o); p.v = p.p = 0; } else { // The operation has been returned to the main io_service. The completion // handler is ready to be delivered. ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated // before the upcall is made. Even if we're not about to make an upcall, // a sub-object of the handler may be the true owner of the memory // associated with the handler. Consequently, a local copy of the handler // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, iterator_type()); p.h = asio::detail::addressof(handler.handler_); if (o->addrinfo_) { handler.arg2_ = iterator_type::create(o->addrinfo_, o->query_.host_name(), o->query_.service_name()); } p.reset(); if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } } private: socket_ops::weak_cancel_token_type cancel_token_; query_type query_; io_service_impl& io_service_impl_; Handler handler_; asio::error_code ec_; asio::detail::addrinfo_type* addrinfo_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVE_OP_HPP galera-3-25.3.20/asio/asio/detail/handler_type_requirements.hpp0000644000015300001660000003751013042054732024266 0ustar jenkinsjenkins// // detail/handler_type_requirements.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP #define ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" // Older versions of gcc have difficulty compiling the sizeof expressions where // we test the handler type requirements. We'll disable checking of handler type // requirements for those compilers, but otherwise enable it by default. #if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) # if !defined(__GNUC__) || (__GNUC__ >= 4) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS 1 # endif // !defined(__GNUC__) || (__GNUC__ >= 4) #endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) // With C++0x we can use a combination of enhanced SFINAE and static_assert to // generate better template error messages. As this technique is not yet widely // portable, we'll only enable it for tested compilers. #if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # if defined(__clang__) # if __has_feature(__cxx_static_assert__) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // __has_feature(cxx_static_assert) # endif // defined(__clang__) #endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) #if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) # include "asio/handler_type.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) // Newer gcc needs special treatment to suppress unused typedef warnings. #if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4) #endif // defined(__GNUC__) #if !defined(ASIO_UNUSED_TYPEDEF) # define ASIO_UNUSED_TYPEDEF #endif // !defined(ASIO_UNUSED_TYPEDEF) namespace asio { namespace detail { #if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) # if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) template auto zero_arg_handler_test(Handler h, void*) -> decltype( sizeof(Handler(static_cast(h))), ((h)()), char(0)); template char (&zero_arg_handler_test(Handler, ...))[2]; template auto one_arg_handler_test(Handler h, Arg1* a1) -> decltype( sizeof(Handler(static_cast(h))), ((h)(*a1)), char(0)); template char (&one_arg_handler_test(Handler h, ...))[2]; template auto two_arg_handler_test(Handler h, Arg1* a1, Arg2* a2) -> decltype( sizeof(Handler(static_cast(h))), ((h)(*a1, *a2)), char(0)); template char (&two_arg_handler_test(Handler, ...))[2]; # define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) \ static_assert(expr, msg); # else // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) # define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) # endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) template T& lvref(); template T& lvref(T); template const T& clvref(); template const T& clvref(T); template char argbyv(T); template struct handler_type_requirements { }; #define ASIO_COMPLETION_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void()) asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::zero_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), 0)) == 1, \ "CompletionHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()(), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_READ_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ReadHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_WRITE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "WriteHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_ACCEPT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "AcceptHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_CONNECT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "ConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_COMPOSED_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, iter_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ComposedConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_RESOLVE_HANDLER_CHECK( \ handler_type, handler, iter_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, iter_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ResolveHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_WAIT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "WaitHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_SIGNAL_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, int)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "SignalHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "HandshakeHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "BufferedHandshakeHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_SHUTDOWN_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "ShutdownHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #else // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) #define ASIO_COMPLETION_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_READ_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_WRITE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_ACCEPT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_CONNECT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_COMPOSED_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_RESOLVE_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_WAIT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_SIGNAL_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_SHUTDOWN_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #endif // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP galera-3-25.3.20/asio/asio/detail/deadline_timer_service.hpp0000644000015300001660000001465313042054732023475 0ustar jenkinsjenkins// // detail/deadline_timer_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP #define ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue.hpp" #include "asio/detail/timer_scheduler.hpp" #include "asio/detail/wait_handler.hpp" #include "asio/detail/wait_op.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include # include #endif // defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class deadline_timer_service { public: // The time type. typedef typename Time_Traits::time_type time_type; // The duration type. typedef typename Time_Traits::duration_type duration_type; // The implementation type of the timer. This type is dependent on the // underlying implementation of the timer service. struct implementation_type : private asio::detail::noncopyable { time_type expiry; bool might_have_pending_waits; typename timer_queue::per_timer_data timer_data; }; // Constructor. deadline_timer_service(asio::io_service& io_service) : scheduler_(asio::use_service(io_service)) { scheduler_.init_task(); scheduler_.add_timer_queue(timer_queue_); } // Destructor. ~deadline_timer_service() { scheduler_.remove_timer_queue(timer_queue_); } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Construct a new timer implementation. void construct(implementation_type& impl) { impl.expiry = time_type(); impl.might_have_pending_waits = false; } // Destroy a timer implementation. void destroy(implementation_type& impl) { asio::error_code ec; cancel(impl, ec); } // Cancel any asynchronous wait operations associated with the timer. std::size_t cancel(implementation_type& impl, asio::error_code& ec) { if (!impl.might_have_pending_waits) { ec = asio::error_code(); return 0; } ASIO_HANDLER_OPERATION(("deadline_timer", &impl, "cancel")); std::size_t count = scheduler_.cancel_timer(timer_queue_, impl.timer_data); impl.might_have_pending_waits = false; ec = asio::error_code(); return count; } // Cancels one asynchronous wait operation associated with the timer. std::size_t cancel_one(implementation_type& impl, asio::error_code& ec) { if (!impl.might_have_pending_waits) { ec = asio::error_code(); return 0; } ASIO_HANDLER_OPERATION(("deadline_timer", &impl, "cancel_one")); std::size_t count = scheduler_.cancel_timer( timer_queue_, impl.timer_data, 1); if (count == 0) impl.might_have_pending_waits = false; ec = asio::error_code(); return count; } // Get the expiry time for the timer as an absolute time. time_type expires_at(const implementation_type& impl) const { return impl.expiry; } // Set the expiry time for the timer as an absolute time. std::size_t expires_at(implementation_type& impl, const time_type& expiry_time, asio::error_code& ec) { std::size_t count = cancel(impl, ec); impl.expiry = expiry_time; ec = asio::error_code(); return count; } // Get the expiry time for the timer relative to now. duration_type expires_from_now(const implementation_type& impl) const { return Time_Traits::subtract(expires_at(impl), Time_Traits::now()); } // Set the expiry time for the timer relative to now. std::size_t expires_from_now(implementation_type& impl, const duration_type& expiry_time, asio::error_code& ec) { return expires_at(impl, Time_Traits::add(Time_Traits::now(), expiry_time), ec); } // Perform a blocking wait on the timer. void wait(implementation_type& impl, asio::error_code& ec) { time_type now = Time_Traits::now(); ec = asio::error_code(); while (Time_Traits::less_than(now, impl.expiry) && !ec) { this->do_wait(Time_Traits::to_posix_duration( Time_Traits::subtract(impl.expiry, now)), ec); now = Time_Traits::now(); } } // Start an asynchronous wait on the timer. template void async_wait(implementation_type& impl, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef wait_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); impl.might_have_pending_waits = true; ASIO_HANDLER_CREATION((p.p, "deadline_timer", &impl, "async_wait")); scheduler_.schedule_timer(timer_queue_, impl.expiry, impl.timer_data, p.p); p.v = p.p = 0; } private: // Helper function to wait given a duration type. The duration type should // either be of type boost::posix_time::time_duration, or implement the // required subset of its interface. template void do_wait(const Duration& timeout, asio::error_code& ec) { #if defined(ASIO_WINDOWS_RUNTIME) std::this_thread::sleep_for( std::chrono::seconds(timeout.total_seconds()) + std::chrono::microseconds(timeout.total_microseconds())); ec = asio::error_code(); #else // defined(ASIO_WINDOWS_RUNTIME) ::timeval tv; tv.tv_sec = timeout.total_seconds(); tv.tv_usec = timeout.total_microseconds() % 1000000; socket_ops::select(0, 0, 0, 0, &tv, ec); #endif // defined(ASIO_WINDOWS_RUNTIME) } // The queue of timers. timer_queue timer_queue_; // The object that schedules and executes timers. Usually a reactor. timer_scheduler& scheduler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/push_options.hpp0000644000015300001660000000746313042054732021543 0ustar jenkinsjenkins// // detail/push_options.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // No header guard #if defined(__COMO__) // Comeau C++ #elif defined(__DMC__) // Digital Mars C++ #elif defined(__INTEL_COMPILER) || defined(__ICL) \ || defined(__ICC) || defined(__ECC) // Intel C++ # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility push (default) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__clang__) // Clang # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if !defined(ASIO_DISABLE_OBJC_WORKAROUND) # if !defined(Protocol) && !defined(id) # define Protocol cpp_Protocol # define id cpp_id # define ASIO_OBJC_WORKAROUND # endif # endif # endif # endif # if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) # pragma GCC visibility push (default) # endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) #elif defined(__GNUC__) // GNU C++ # if defined(__MINGW32__) || defined(__CYGWIN__) # pragma pack (push, 8) # endif # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if !defined(ASIO_DISABLE_OBJC_WORKAROUND) # if !defined(Protocol) && !defined(id) # define Protocol cpp_Protocol # define id cpp_id # define ASIO_OBJC_WORKAROUND # endif # endif # endif # endif # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility push (default) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__KCC) // Kai C++ #elif defined(__sgi) // SGI MIPSpro C++ #elif defined(__DECCXX) // Compaq Tru64 Unix cxx #elif defined(__ghs) // Greenhills C++ #elif defined(__BORLANDC__) // Borland C++ # pragma option push -a8 -b -Ve- -Vx- -w-inl -vi- # pragma nopushoptwarn # pragma nopackwarning # if !defined(__MT__) # error Multithreaded RTL must be selected. # endif // !defined(__MT__) #elif defined(__MWERKS__) // Metrowerks CodeWarrior #elif defined(__SUNPRO_CC) // Sun Workshop Compiler C++ #elif defined(__HP_aCC) // HP aCC #elif defined(__MRC__) || defined(__SC__) // MPW MrCpp or SCpp #elif defined(__IBMCPP__) // IBM Visual Age #elif defined(_MSC_VER) // Microsoft Visual C++ // // Must remain the last #elif since some other vendors (Metrowerks, for example) // also #define _MSC_VER # pragma warning (disable:4103) # pragma warning (push) # pragma warning (disable:4127) # pragma warning (disable:4180) # pragma warning (disable:4244) # pragma warning (disable:4355) # pragma warning (disable:4510) # pragma warning (disable:4512) # pragma warning (disable:4610) # pragma warning (disable:4675) # if defined(_M_IX86) && defined(_Wp64) // The /Wp64 option is broken. If you want to check 64 bit portability, use a // 64 bit compiler! # pragma warning (disable:4311) # pragma warning (disable:4312) # endif // defined(_M_IX86) && defined(_Wp64) # pragma pack (push, 8) // Note that if the /Og optimisation flag is enabled with MSVC6, the compiler // has a tendency to incorrectly optimise away some calls to member template // functions, even though those functions contain code that should not be // optimised away! Therefore we will always disable this optimisation option // for the MSVC6 compiler. # if (_MSC_VER < 1300) # pragma optimize ("g", off) # endif # if !defined(_MT) # error Multithreaded RTL must be selected. # endif // !defined(_MT) # if defined(__cplusplus_cli) || defined(__cplusplus_winrt) # if !defined(ASIO_DISABLE_CLR_WORKAROUND) # if !defined(generic) # define generic cpp_generic # define ASIO_CLR_WORKAROUND # endif # endif # endif #endif galera-3-25.3.20/asio/asio/detail/win_iocp_overlapped_ptr.hpp0000644000015300001660000000653113042054732023721 0ustar jenkinsjenkins// // detail/win_iocp_overlapped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP #define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/win_iocp_overlapped_op.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Wraps a handler to create an OVERLAPPED object for use with overlapped I/O. class win_iocp_overlapped_ptr : private noncopyable { public: // Construct an empty win_iocp_overlapped_ptr. win_iocp_overlapped_ptr() : ptr_(0), iocp_service_(0) { } // Construct an win_iocp_overlapped_ptr to contain the specified handler. template explicit win_iocp_overlapped_ptr( asio::io_service& io_service, ASIO_MOVE_ARG(Handler) handler) : ptr_(0), iocp_service_(0) { this->reset(io_service, ASIO_MOVE_CAST(Handler)(handler)); } // Destructor automatically frees the OVERLAPPED object unless released. ~win_iocp_overlapped_ptr() { reset(); } // Reset to empty. void reset() { if (ptr_) { ptr_->destroy(); ptr_ = 0; iocp_service_->work_finished(); iocp_service_ = 0; } } // Reset to contain the specified handler, freeing any current OVERLAPPED // object. template void reset(asio::io_service& io_service, Handler handler) { typedef win_iocp_overlapped_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", &io_service.impl_, "overlapped")); io_service.impl_.work_started(); reset(); ptr_ = p.p; p.v = p.p = 0; iocp_service_ = &io_service.impl_; } // Get the contained OVERLAPPED object. OVERLAPPED* get() { return ptr_; } // Get the contained OVERLAPPED object. const OVERLAPPED* get() const { return ptr_; } // Release ownership of the OVERLAPPED object. OVERLAPPED* release() { if (ptr_) iocp_service_->on_pending(ptr_); OVERLAPPED* tmp = ptr_; ptr_ = 0; iocp_service_ = 0; return tmp; } // Post completion notification for overlapped operation. Releases ownership. void complete(const asio::error_code& ec, std::size_t bytes_transferred) { if (ptr_) { iocp_service_->on_completion(ptr_, ec, static_cast(bytes_transferred)); ptr_ = 0; iocp_service_ = 0; } } private: win_iocp_operation* ptr_; win_iocp_io_service* iocp_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP galera-3-25.3.20/asio/asio/detail/date_time_fwd.hpp0000644000015300001660000000140013042054732021565 0ustar jenkinsjenkins// // detail/date_time_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DATE_TIME_FWD_HPP #define ASIO_DETAIL_DATE_TIME_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace boost { namespace date_time { template class base_time; } // namespace date_time namespace posix_time { class ptime; } // namespace posix_time } // namespace boost #endif // ASIO_DETAIL_DATE_TIME_FWD_HPP galera-3-25.3.20/asio/asio/detail/std_event.hpp0000644000015300001660000000701113042054732020771 0ustar jenkinsjenkins// // detail/std_event.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_EVENT_HPP #define ASIO_DETAIL_STD_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event : private noncopyable { public: // Constructor. std_event() : state_(0) { } // Destructor. ~std_event() { } // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; cond_.notify_all(); } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) cond_.notify_one(); } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); cond_.notify_one(); return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); unique_lock_adapter u_lock(lock); while ((state_ & 1) == 0) { waiter w(state_); cond_.wait(u_lock.unique_lock_); } } // Timed wait for the event to become signalled. template bool wait_for_usec(Lock& lock, long usec) { ASIO_ASSERT(lock.locked()); unique_lock_adapter u_lock(lock); if ((state_ & 1) == 0) { waiter w(state_); cond_.wait_for(u_lock.unique_lock_, std::chrono::microseconds(usec)); } return (state_ & 1) != 0; } private: // Helper class to temporarily adapt a scoped_lock into a unique_lock so that // it can be passed to std::condition_variable::wait(). struct unique_lock_adapter { template explicit unique_lock_adapter(Lock& lock) : unique_lock_(lock.mutex().mutex_, std::adopt_lock) { } ~unique_lock_adapter() { unique_lock_.release(); } std::unique_lock unique_lock_; }; // Helper to increment and decrement the state to track outstanding waiters. class waiter { public: explicit waiter(std::size_t& state) : state_(state) { state_ += 2; } ~waiter() { state_ -= 2; } private: std::size_t& state_; }; std::condition_variable cond_; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_EVENT_HPP galera-3-25.3.20/asio/asio/detail/static_mutex.hpp0000644000015300001660000000303213042054732021506 0ustar jenkinsjenkins// // detail/static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STATIC_MUTEX_HPP #define ASIO_DETAIL_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_static_mutex.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_static_mutex.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_static_mutex.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_static_mutex.hpp" #else # error Only Windows and POSIX are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_NULL_STATIC_MUTEX_INIT #elif defined(ASIO_WINDOWS) typedef win_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_WIN_STATIC_MUTEX_INIT #elif defined(ASIO_HAS_PTHREADS) typedef posix_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_POSIX_STATIC_MUTEX_INIT #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_STD_STATIC_MUTEX_INIT #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_STATIC_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/win_mutex.hpp0000644000015300001660000000323213042054732021016 0ustar jenkinsjenkins// // detail/win_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_MUTEX_HPP #define ASIO_DETAIL_WIN_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. ASIO_DECL win_mutex(); // Destructor. ~win_mutex() { ::DeleteCriticalSection(&crit_section_); } // Lock the mutex. void lock() { ::EnterCriticalSection(&crit_section_); } // Unlock the mutex. void unlock() { ::LeaveCriticalSection(&crit_section_); } private: // Initialisation must be performed in a separate function to the constructor // since the compiler does not support the use of structured exceptions and // C++ exceptions in the same function. ASIO_DECL int do_init(); ::CRITICAL_SECTION crit_section_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_thread_info.hpp0000644000015300001660000000147413042054732023156 0ustar jenkinsjenkins// // detail/win_iocp_thread_info.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP #define ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_iocp_thread_info : public thread_info_base { }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP galera-3-25.3.20/asio/asio/detail/posix_thread.hpp0000644000015300001660000000361613042054732021476 0ustar jenkinsjenkins// // detail/posix_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_THREAD_HPP #define ASIO_DETAIL_POSIX_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { extern "C" { ASIO_DECL void* asio_detail_posix_thread_function(void* arg); } class posix_thread : private noncopyable { public: // Constructor. template posix_thread(Function f, unsigned int = 0) : joined_(false) { start_thread(new func(f)); } // Destructor. ASIO_DECL ~posix_thread(); // Wait for the thread to exit. ASIO_DECL void join(); private: friend void* asio_detail_posix_thread_function(void* arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; struct auto_func_base_ptr { func_base* ptr; ~auto_func_base_ptr() { delete ptr; } }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ASIO_DECL void start_thread(func_base* arg); ::pthread_t thread_; bool joined_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_thread.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_THREAD_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_serial_port_service.hpp0000644000015300001660000001630413042054732024735 0ustar jenkinsjenkins// // detail/win_iocp_serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Extend win_iocp_handle_service to provide serial port support. class win_iocp_serial_port_service { public: // The native type of a serial port. typedef win_iocp_handle_service::native_handle_type native_handle_type; // The implementation type of the serial port. typedef win_iocp_handle_service::implementation_type implementation_type; // Constructor. ASIO_DECL win_iocp_serial_port_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new serial port implementation. void construct(implementation_type& impl) { handle_service_.construct(impl); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { handle_service_.move_construct(impl, other_impl); } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, win_iocp_serial_port_service& other_service, implementation_type& other_impl) { handle_service_.move_assign(impl, other_service.handle_service_, other_impl); } // Destroy a serial port implementation. void destroy(implementation_type& impl) { handle_service_.destroy(impl); } // Open the serial port using the specified device name. ASIO_DECL asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec); // Assign a native handle to a serial port implementation. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return handle_service_.assign(impl, handle, ec); } // Determine whether the serial port is open. bool is_open(const implementation_type& impl) const { return handle_service_.is_open(impl); } // Destroy a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return handle_service_.close(impl, ec); } // Get the native serial port representation. native_handle_type native_handle(implementation_type& impl) { return handle_service_.native_handle(impl); } // Cancel all operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return handle_service_.cancel(impl, ec); } // Set an option on the serial port. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return do_set_option(impl, &win_iocp_serial_port_service::store_option, &option, ec); } // Get an option from the serial port. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return do_get_option(impl, &win_iocp_serial_port_service::load_option, &option, ec); } // Send a break sequence to the serial port. asio::error_code send_break(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Write the given data. Returns the number of bytes sent. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return handle_service_.write_some(impl, buffers, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { handle_service_.async_write_some(impl, buffers, handler); } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return handle_service_.read_some(impl, buffers, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { handle_service_.async_read_some(impl, buffers, handler); } private: // Function pointer type for storing a serial port option. typedef asio::error_code (*store_function_type)( const void*, ::DCB&, asio::error_code&); // Helper function template to store a serial port option. template static asio::error_code store_option(const void* option, ::DCB& storage, asio::error_code& ec) { return static_cast(option)->store( storage, ec); } // Helper function to set a serial port option. ASIO_DECL asio::error_code do_set_option( implementation_type& impl, store_function_type store, const void* option, asio::error_code& ec); // Function pointer type for loading a serial port option. typedef asio::error_code (*load_function_type)( void*, const ::DCB&, asio::error_code&); // Helper function template to load a serial port option. template static asio::error_code load_option(void* option, const ::DCB& storage, asio::error_code& ec) { return static_cast(option)->load(storage, ec); } // Helper function to get a serial port option. ASIO_DECL asio::error_code do_get_option( const implementation_type& impl, load_function_type load, void* option, asio::error_code& ec) const; // The implementation used for initiating asynchronous operations. win_iocp_handle_service handle_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_serial_port_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/reactor.hpp0000644000015300001660000000157513042054732020446 0ustar jenkinsjenkins// // detail/reactor.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_HPP #define ASIO_DETAIL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/reactor_fwd.hpp" #if defined(ASIO_HAS_EPOLL) # include "asio/detail/epoll_reactor.hpp" #elif defined(ASIO_HAS_KQUEUE) # include "asio/detail/kqueue_reactor.hpp" #elif defined(ASIO_HAS_DEV_POLL) # include "asio/detail/dev_poll_reactor.hpp" #elif defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_reactor.hpp" #else # include "asio/detail/select_reactor.hpp" #endif #endif // ASIO_DETAIL_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/null_tss_ptr.hpp0000644000015300001660000000223313042054732021527 0ustar jenkinsjenkins// // detail/null_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_TSS_PTR_HPP #define ASIO_DETAIL_NULL_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class null_tss_ptr : private noncopyable { public: // Constructor. null_tss_ptr() : value_(0) { } // Destructor. ~null_tss_ptr() { } // Get the value. operator T*() const { return value_; } // Set the value. void operator=(T* value) { value_ = value; } private: T* value_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_TSS_PTR_HPP galera-3-25.3.20/asio/asio/detail/thread.hpp0000644000015300001660000000252013042054732020245 0ustar jenkinsjenkins// // detail/thread.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_HPP #define ASIO_DETAIL_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_thread.hpp" #elif defined(ASIO_WINDOWS) # if defined(UNDER_CE) # include "asio/detail/wince_thread.hpp" # else # include "asio/detail/win_thread.hpp" # endif #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_thread.hpp" #elif defined(ASIO_HAS_STD_THREAD) # include "asio/detail/std_thread.hpp" #else # error Only Windows, POSIX and std::thread are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_thread thread; #elif defined(ASIO_WINDOWS) # if defined(UNDER_CE) typedef wince_thread thread; # else typedef win_thread thread; # endif #elif defined(ASIO_HAS_PTHREADS) typedef posix_thread thread; #elif defined(ASIO_HAS_STD_THREAD) typedef std_thread thread; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THREAD_HPP galera-3-25.3.20/asio/asio/detail/dependent_type.hpp0000644000015300001660000000146013042054732022007 0ustar jenkinsjenkins// // detail/dependent_type.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEPENDENT_TYPE_HPP #define ASIO_DETAIL_DEPENDENT_TYPE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct dependent_type { typedef T type; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_DEPENDENT_TYPE_HPP galera-3-25.3.20/asio/asio/detail/shared_ptr.hpp0000644000015300001660000000171513042054732021136 0ustar jenkinsjenkins// // detail/shared_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SHARED_PTR_HPP #define ASIO_DETAIL_SHARED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SHARED_PTR) # include #else // defined(ASIO_HAS_STD_SHARED_PTR) # include #endif // defined(ASIO_HAS_STD_SHARED_PTR) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_SHARED_PTR) using std::shared_ptr; #else // defined(ASIO_HAS_STD_SHARED_PTR) using boost::shared_ptr; #endif // defined(ASIO_HAS_STD_SHARED_PTR) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_SHARED_PTR_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_overlapped_op.hpp0000644000015300001660000000516713042054732023536 0ustar jenkinsjenkins// // detail/win_iocp_overlapped_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP #define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_overlapped_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_overlapped_op); win_iocp_overlapped_op(Handler& handler) : operation(&win_iocp_overlapped_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { // Take ownership of the operation object. win_iocp_overlapped_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP galera-3-25.3.20/asio/asio/detail/dev_poll_reactor.hpp0000644000015300001660000001565513042054732022336 0ustar jenkinsjenkins// // detail/dev_poll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEV_POLL_REACTOR_HPP #define ASIO_DETAIL_DEV_POLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include #include #include #include "asio/detail/hash_map.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class dev_poll_reactor : public asio::detail::service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor data. struct per_descriptor_data { }; // Constructor. ASIO_DECL dev_poll_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~dev_poll_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data&, bool closing); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data&); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run /dev/poll once until interrupted or events are ready to be dispatched. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: // Create the /dev/poll file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_dev_poll_create(); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the /dev/poll DP_POLL operation. The timeout // value is returned as a number of milliseconds. A return value of -1 // indicates that the poll should block indefinitely. ASIO_DECL int get_timeout(); // Cancel all operations associated with the given descriptor. The do_cancel // function of the handler objects will be invoked. This function does not // acquire the dev_poll_reactor's mutex. ASIO_DECL void cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec); // Add a pending event entry for the given descriptor. ASIO_DECL ::pollfd& add_pending_event_change(int descriptor); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // The /dev/poll file descriptor. int dev_poll_fd_; // Vector of /dev/poll events waiting to be written to the descriptor. std::vector< ::pollfd> pending_event_changes_; // Hash map to associate a descriptor with a pending event change index. hash_map pending_event_change_index_; // The interrupter is used to break a blocking DP_POLL operation. select_interrupter interrupter_; // The queues of read, write and except operations. reactor_op_queue op_queue_[max_ops]; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/dev_poll_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/dev_poll_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_DEV_POLL_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/tss_ptr.hpp0000644000015300001660000000326013042054732020476 0ustar jenkinsjenkins// // detail/tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TSS_PTR_HPP #define ASIO_DETAIL_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_tss_ptr.hpp" #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) # include "asio/detail/keyword_tss_ptr.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_tss_ptr.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_tss_ptr.hpp" #else # error Only Windows and POSIX are supported! #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class tss_ptr #if !defined(ASIO_HAS_THREADS) : public null_tss_ptr #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) : public keyword_tss_ptr #elif defined(ASIO_WINDOWS) : public win_tss_ptr #elif defined(ASIO_HAS_PTHREADS) : public posix_tss_ptr #endif { public: void operator=(T* value) { #if !defined(ASIO_HAS_THREADS) null_tss_ptr::operator=(value); #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) keyword_tss_ptr::operator=(value); #elif defined(ASIO_WINDOWS) win_tss_ptr::operator=(value); #elif defined(ASIO_HAS_PTHREADS) posix_tss_ptr::operator=(value); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TSS_PTR_HPP galera-3-25.3.20/asio/asio/detail/pipe_select_interrupter.hpp0000644000015300001660000000457613042054732023752 0ustar jenkinsjenkins// // detail/pipe_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(ASIO_HAS_EVENTFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class pipe_select_interrupter { public: // Constructor. ASIO_DECL pipe_select_interrupter(); // Destructor. ASIO_DECL ~pipe_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. int write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/pipe_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_send_op.hpp0000644000015300001660000000653613042054732023677 0ustar jenkinsjenkins// // detail/win_iocp_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_send_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_send_op); win_iocp_socket_send_op(socket_ops::weak_cancel_token_type cancel_token, const ConstBufferSequence& buffers, Handler& handler) : operation(&win_iocp_socket_send_op::do_complete), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_send(o->cancel_token_, ec); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; ConstBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP galera-3-25.3.20/asio/asio/detail/solaris_fenced_block.hpp0000644000015300001660000000230213042054732023126 0ustar jenkinsjenkins// // detail/solaris_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP #define ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__sun) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class solaris_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit solaris_fenced_block(half_t) { } // Constructor for a full fenced block. explicit solaris_fenced_block(full_t) { membar_consumer(); } // Destructor. ~solaris_fenced_block() { membar_producer(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__sun) #endif // ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/timer_queue_ptime.hpp0000644000015300001660000000532713042054732022530 0ustar jenkinsjenkins// // detail/timer_queue_ptime.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP #define ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/time_traits.hpp" #include "asio/detail/timer_queue.hpp" #include "asio/detail/push_options.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) namespace asio { namespace detail { struct forwarding_posix_time_traits : time_traits {}; // Template specialisation for the commonly used instantation. template <> class timer_queue > : public timer_queue_base { public: // The time type. typedef boost::posix_time::ptime time_type; // The duration type. typedef boost::posix_time::time_duration duration_type; // Per-timer data. typedef timer_queue::per_timer_data per_timer_data; // Constructor. ASIO_DECL timer_queue(); // Destructor. ASIO_DECL virtual ~timer_queue(); // Add a new timer to the queue. Returns true if this is the timer that is // earliest in the queue, in which case the reactor's event demultiplexing // function call may need to be interrupted and restarted. ASIO_DECL bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op); // Whether there are no timers in the queue. ASIO_DECL virtual bool empty() const; // Get the time for the timer that is earliest in the queue. ASIO_DECL virtual long wait_duration_msec(long max_duration) const; // Get the time for the timer that is earliest in the queue. ASIO_DECL virtual long wait_duration_usec(long max_duration) const; // Dequeue all timers not later than the current time. ASIO_DECL virtual void get_ready_timers(op_queue& ops); // Dequeue all timers. ASIO_DECL virtual void get_all_timers(op_queue& ops); // Cancel and dequeue operations for the given timer. ASIO_DECL std::size_t cancel_timer( per_timer_data& timer, op_queue& ops, std::size_t max_cancelled = (std::numeric_limits::max)()); private: timer_queue impl_; }; } // namespace detail } // namespace asio #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/timer_queue_ptime.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP galera-3-25.3.20/asio/asio/detail/wait_handler.hpp0000644000015300001660000000456613042054732021453 0ustar jenkinsjenkins// // detail/wait_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WAIT_HANDLER_HPP #define ASIO_DETAIL_WAIT_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class wait_handler : public wait_op { public: ASIO_DEFINE_HANDLER_PTR(wait_handler); wait_handler(Handler& h) : wait_op(&wait_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. wait_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; ASIO_HANDLER_COMPLETION((h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(h->handler_, h->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WAIT_HANDLER_HPP galera-3-25.3.20/asio/asio/detail/reactor_fwd.hpp0000644000015300001660000000200613042054732021274 0ustar jenkinsjenkins// // detail/reactor_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_FWD_HPP #define ASIO_DETAIL_REACTOR_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) typedef class null_reactor reactor; #elif defined(ASIO_HAS_IOCP) typedef class select_reactor reactor; #elif defined(ASIO_HAS_EPOLL) typedef class epoll_reactor reactor; #elif defined(ASIO_HAS_KQUEUE) typedef class kqueue_reactor reactor; #elif defined(ASIO_HAS_DEV_POLL) typedef class dev_poll_reactor reactor; #else typedef class select_reactor reactor; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_REACTOR_FWD_HPP galera-3-25.3.20/asio/asio/detail/atomic_count.hpp0000644000015300001660000000244713042054732021472 0ustar jenkinsjenkins// // detail/atomic_count.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ATOMIC_COUNT_HPP #define ASIO_DETAIL_ATOMIC_COUNT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) // Nothing to include. #elif defined(ASIO_HAS_STD_ATOMIC) # include #else // defined(ASIO_HAS_STD_ATOMIC) # include #endif // defined(ASIO_HAS_STD_ATOMIC) namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef long atomic_count; inline void increment(atomic_count& a, long b) { a += b; } #elif defined(ASIO_HAS_STD_ATOMIC) typedef std::atomic atomic_count; inline void increment(atomic_count& a, long b) { a += b; } #else // defined(ASIO_HAS_STD_ATOMIC) typedef boost::detail::atomic_count atomic_count; inline void increment(atomic_count& a, long b) { while (b > 0) ++a, --b; } #endif // defined(ASIO_HAS_STD_ATOMIC) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ATOMIC_COUNT_HPP galera-3-25.3.20/asio/asio/detail/handler_alloc_helpers.hpp0000644000015300001660000000375513042054732023322 0ustar jenkinsjenkins// // detail/handler_alloc_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP #define ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/handler_alloc_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_allocate and asio_handler_deallocate must be made from // a namespace that does not contain any overloads of these functions. The // asio_handler_alloc_helpers namespace is defined here for that purpose. namespace asio_handler_alloc_helpers { template inline void* allocate(std::size_t s, Handler& h) { #if !defined(ASIO_HAS_HANDLER_HOOKS) return ::operator new(s); #else using asio::asio_handler_allocate; return asio_handler_allocate(s, asio::detail::addressof(h)); #endif } template inline void deallocate(void* p, std::size_t s, Handler& h) { #if !defined(ASIO_HAS_HANDLER_HOOKS) ::operator delete(p); #else using asio::asio_handler_deallocate; asio_handler_deallocate(p, s, asio::detail::addressof(h)); #endif } } // namespace asio_handler_alloc_helpers #define ASIO_DEFINE_HANDLER_PTR(op) \ struct ptr \ { \ Handler* h; \ void* v; \ op* p; \ ~ptr() \ { \ reset(); \ } \ void reset() \ { \ if (p) \ { \ p->~op(); \ p = 0; \ } \ if (v) \ { \ asio_handler_alloc_helpers::deallocate(v, sizeof(op), *h); \ v = 0; \ } \ } \ } \ /**/ #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_service_base.hpp0000644000015300001660000004646013042054732024702 0ustar jenkinsjenkins// // detail/win_iocp_socket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/win_iocp_null_buffers_op.hpp" #include "asio/detail/win_iocp_socket_connect_op.hpp" #include "asio/detail/win_iocp_socket_send_op.hpp" #include "asio/detail/win_iocp_socket_recv_op.hpp" #include "asio/detail/win_iocp_socket_recvmsg_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_socket_service_base { public: // The implementation type of the socket. struct base_implementation_type { // The native socket representation. socket_type socket_; // The current state of the socket. socket_ops::state_type state_; // We use a shared pointer as a cancellation token here to work around the // broken Windows support for cancellation. MSDN says that when you call // closesocket any outstanding WSARecv or WSASend operations will complete // with the error ERROR_OPERATION_ABORTED. In practice they complete with // ERROR_NETNAME_DELETED, which means you can't tell the difference between // a local cancellation and the socket being hard-closed by the peer. socket_ops::shared_cancel_token_type cancel_token_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; #if defined(ASIO_ENABLE_CANCELIO) // The ID of the thread from which it is safe to cancel asynchronous // operations. 0 means no asynchronous operations have been started yet. // ~0 means asynchronous operations have been started from more than one // thread, and cancellation is not supported for the socket. DWORD safe_cancellation_thread_id_; #endif // defined(ASIO_ENABLE_CANCELIO) // Pointers to adjacent socket implementations in linked list. base_implementation_type* next_; base_implementation_type* prev_; }; // Constructor. ASIO_DECL win_iocp_socket_service_base( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type& impl); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, win_iocp_socket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != invalid_socket; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Cancel all operations associated with the socket. ASIO_DECL asio::error_code cancel( base_implementation_type& impl, asio::error_code& ec); // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::sockatmark(impl.socket_, ec); } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::available(impl.socket_, ec); } // Place the socket into the state where it will listen for new connections. asio::error_code listen(base_implementation_type& impl, int backlog, asio::error_code& ec) { socket_ops::listen(impl.socket_, backlog, ec); return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { socket_ops::ioctl(impl.socket_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { socket_ops::shutdown(impl.socket_, what, ec); return ec; } // Send the given data to the peer. Returns the number of bytes sent. template size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_send(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be sent without blocking. size_t send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send")); buffer_sequence_adapter bufs(buffers); start_send_op(impl, bufs.buffers(), bufs.count(), flags, (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(), p.p); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send(null_buffers)")); start_reactor_op(impl, reactor::write_op, p.p); p.v = p.p = 0; } // Receive some data from the peer. Returns the number of bytes received. template size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recv(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be received without blocking. size_t receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.state_, impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive")); buffer_sequence_adapter bufs(buffers); start_receive_op(impl, bufs.buffers(), bufs.count(), flags, (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(), p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive(null_buffers)")); start_null_buffers_receive_op(impl, flags, p.p); p.v = p.p = 0; } // Receive some data with associated flags. Returns the number of bytes // received. template size_t receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recvmsg(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), in_flags, out_flags, ec); } // Wait until data can be received without blocking. size_t receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recvmsg_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, out_flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags")); buffer_sequence_adapter bufs(buffers); start_receive_op(impl, bufs.buffers(), bufs.count(), in_flags, false, p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags(null_buffers)")); // Reset out_flags since it can be given no sensible value at this time. out_flags = 0; start_null_buffers_receive_op(impl, in_flags, p.p); p.v = p.p = 0; } // Helper function to restart an asynchronous accept operation. ASIO_DECL void restart_accept_op(socket_type s, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op); protected: // Open a new socket implementation. ASIO_DECL asio::error_code do_open( base_implementation_type& impl, int family, int type, int protocol, asio::error_code& ec); // Assign a native socket to a socket implementation. ASIO_DECL asio::error_code do_assign( base_implementation_type& impl, int type, socket_type native_socket, asio::error_code& ec); // Helper function to start an asynchronous send operation. ASIO_DECL void start_send_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op); // Helper function to start an asynchronous send_to operation. ASIO_DECL void start_send_to_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, const socket_addr_type* addr, int addrlen, socket_base::message_flags flags, operation* op); // Helper function to start an asynchronous receive operation. ASIO_DECL void start_receive_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op); // Helper function to start an asynchronous null_buffers receive operation. ASIO_DECL void start_null_buffers_receive_op( base_implementation_type& impl, socket_base::message_flags flags, reactor_op* op); // Helper function to start an asynchronous receive_from operation. ASIO_DECL void start_receive_from_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, socket_base::message_flags flags, int* addrlen, operation* op); // Helper function to start an asynchronous accept operation. ASIO_DECL void start_accept_op(base_implementation_type& impl, bool peer_is_open, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op); // Start an asynchronous read or write operation using the reactor. ASIO_DECL void start_reactor_op(base_implementation_type& impl, int op_type, reactor_op* op); // Start the asynchronous connect operation using the reactor. ASIO_DECL void start_connect_op(base_implementation_type& impl, int family, int type, const socket_addr_type* remote_addr, std::size_t remote_addrlen, win_iocp_socket_connect_op_base* op); // Helper function to close a socket when the associated object is being // destroyed. ASIO_DECL void close_for_destruction(base_implementation_type& impl); // Update the ID of the thread from which cancellation is safe. ASIO_DECL void update_cancellation_thread_id( base_implementation_type& impl); // Helper function to get the reactor. If no reactor has been created yet, a // new one is obtained from the io_service and a pointer to it is cached in // this service. ASIO_DECL reactor& get_reactor(); // The type of a ConnectEx function pointer, as old SDKs may not provide it. typedef BOOL (PASCAL *connect_ex_fn)(SOCKET, const socket_addr_type*, int, void*, DWORD, DWORD*, OVERLAPPED*); // Helper function to get the ConnectEx pointer. If no ConnectEx pointer has // been obtained yet, one is obtained using WSAIoctl and the pointer is // cached. Returns a null pointer if ConnectEx is not available. ASIO_DECL connect_ex_fn get_connect_ex( base_implementation_type& impl, int type); // Helper function to emulate InterlockedCompareExchangePointer functionality // for: // - very old Platform SDKs; and // - platform SDKs where MSVC's /Wp64 option causes spurious warnings. ASIO_DECL void* interlocked_compare_exchange_pointer( void** dest, void* exch, void* cmp); // Helper function to emulate InterlockedExchangePointer functionality for: // - very old Platform SDKs; and // - platform SDKs where MSVC's /Wp64 option causes spurious warnings. ASIO_DECL void* interlocked_exchange_pointer(void** dest, void* val); // The io_service used to obtain the reactor, if required. asio::io_service& io_service_; // The IOCP service used for running asynchronous operations and dispatching // handlers. win_iocp_io_service& iocp_service_; // The reactor used for performing connect operations. This object is created // only if needed. reactor* reactor_; // Pointer to ConnectEx implementation. void* connect_ex_; // Mutex to protect access to the linked list of implementations. asio::detail::mutex mutex_; // The head of a linked list of all implementations. base_implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_socket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP galera-3-25.3.20/asio/asio/detail/keyword_tss_ptr.hpp0000644000015300001660000000245013042054732022242 0ustar jenkinsjenkins// // detail/keyword_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_KEYWORD_TSS_PTR_HPP #define ASIO_DETAIL_KEYWORD_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class keyword_tss_ptr : private noncopyable { public: // Constructor. keyword_tss_ptr() { } // Destructor. ~keyword_tss_ptr() { } // Get the value. operator T*() const { return value_; } // Set the value. void operator=(T* value) { value_ = value; } private: static ASIO_THREAD_KEYWORD T* value_; }; template ASIO_THREAD_KEYWORD T* keyword_tss_ptr::value_; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) #endif // ASIO_DETAIL_KEYWORD_TSS_PTR_HPP galera-3-25.3.20/asio/asio/detail/cstdint.hpp0000644000015300001660000000207313042054732020451 0ustar jenkinsjenkins// // detail/cstdint.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CSTDINT_HPP #define ASIO_DETAIL_CSTDINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CSTDINT) # include #else // defined(ASIO_HAS_CSTDINT) # include #endif // defined(ASIO_HAS_CSTDINT) namespace asio { #if defined(ASIO_HAS_CSTDINT) using std::int16_t; using std::uint16_t; using std::int32_t; using std::uint32_t; using std::int64_t; using std::uint64_t; #else // defined(ASIO_HAS_CSTDINT) using boost::int16_t; using boost::uint16_t; using boost::int32_t; using boost::uint32_t; using boost::int64_t; using boost::uint64_t; #endif // defined(ASIO_HAS_CSTDINT) } // namespace asio #endif // ASIO_DETAIL_CSTDINT_HPP galera-3-25.3.20/asio/asio/detail/winrt_timer_scheduler.hpp0000644000015300001660000000757613042054732023417 0ustar jenkinsjenkins// // detail/winrt_timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/event.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/thread.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_timer_scheduler : public asio::detail::service_base { public: // Constructor. ASIO_DECL winrt_timer_scheduler(asio::io_service& io_service); // Destructor. ASIO_DECL ~winrt_timer_scheduler(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. No effect as this class uses its own thread. ASIO_DECL void init_task(); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); private: // Run the select loop in the thread. ASIO_DECL void run_thread(); // Entry point for the select loop thread. ASIO_DECL static void call_run_thread(winrt_timer_scheduler* reactor); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex used to protect internal variables. asio::detail::mutex mutex_; // Event used to wake up background thread. asio::detail::event event_; // The timer queues. timer_queue_set timer_queues_; // The background thread that is waiting for timers to expire. asio::detail::thread* thread_; // Does the background thread need to stop. bool stop_thread_; // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/winrt_timer_scheduler.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winrt_timer_scheduler.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP galera-3-25.3.20/asio/asio/detail/signal_set_service.hpp0000644000015300001660000001402313042054732022647 0ustar jenkinsjenkins// // detail/signal_set_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP #define ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/signal_handler.hpp" #include "asio/detail/signal_op.hpp" #include "asio/detail/socket_types.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) # include "asio/detail/reactor.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(NSIG) && (NSIG > 0) enum { max_signal_number = NSIG }; #else enum { max_signal_number = 128 }; #endif extern ASIO_DECL struct signal_state* get_signal_state(); extern "C" ASIO_DECL void asio_signal_handler(int signal_number); class signal_set_service { public: // Type used for tracking an individual signal registration. class registration { public: // Default constructor. registration() : signal_number_(0), queue_(0), undelivered_(0), next_in_table_(0), prev_in_table_(0), next_in_set_(0) { } private: // Only this service will have access to the internal values. friend class signal_set_service; // The signal number that is registered. int signal_number_; // The waiting signal handlers. op_queue* queue_; // The number of undelivered signals. std::size_t undelivered_; // Pointers to adjacent registrations in the registrations_ table. registration* next_in_table_; registration* prev_in_table_; // Link to next registration in the signal set. registration* next_in_set_; }; // The implementation type of the signal_set. class implementation_type { public: // Default constructor. implementation_type() : signals_(0) { } private: // Only this service will have access to the internal values. friend class signal_set_service; // The pending signal handlers. op_queue queue_; // Linked list of registered signals. registration* signals_; }; // Constructor. ASIO_DECL signal_set_service(asio::io_service& io_service); // Destructor. ASIO_DECL ~signal_set_service(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Perform fork-related housekeeping. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Construct a new signal_set implementation. ASIO_DECL void construct(implementation_type& impl); // Destroy a signal_set implementation. ASIO_DECL void destroy(implementation_type& impl); // Add a signal to a signal_set. ASIO_DECL asio::error_code add(implementation_type& impl, int signal_number, asio::error_code& ec); // Remove a signal to a signal_set. ASIO_DECL asio::error_code remove(implementation_type& impl, int signal_number, asio::error_code& ec); // Remove all signals from a signal_set. ASIO_DECL asio::error_code clear(implementation_type& impl, asio::error_code& ec); // Cancel all operations associated with the signal set. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Start an asynchronous operation to wait for a signal to be delivered. template void async_wait(implementation_type& impl, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef signal_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "signal_set", &impl, "async_wait")); start_wait_op(impl, p.p); p.v = p.p = 0; } // Deliver notification that a particular signal occurred. ASIO_DECL static void deliver_signal(int signal_number); private: // Helper function to add a service to the global signal state. ASIO_DECL static void add_service(signal_set_service* service); // Helper function to remove a service from the global signal state. ASIO_DECL static void remove_service(signal_set_service* service); // Helper function to create the pipe descriptors. ASIO_DECL static void open_descriptors(); // Helper function to close the pipe descriptors. ASIO_DECL static void close_descriptors(); // Helper function to start a wait operation. ASIO_DECL void start_wait_op(implementation_type& impl, signal_op* op); // The io_service instance used for dispatching handlers. io_service_impl& io_service_; #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // The type used for registering for pipe reactor notifications. class pipe_read_op; // The reactor used for waiting for pipe readiness. reactor& reactor_; // The per-descriptor reactor data used for the pipe. reactor::per_descriptor_data reactor_data_; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) // A mapping from signal number to the registered signal sets. registration* registrations_[max_signal_number]; // Pointers to adjacent services in linked list. signal_set_service* next_; signal_set_service* prev_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/signal_set_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/reactive_descriptor_service.hpp0000644000015300001660000002462413042054732024567 0ustar jenkinsjenkins// // detail/reactive_descriptor_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/buffer.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/descriptor_read_op.hpp" #include "asio/detail/descriptor_write_op.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_descriptor_service { public: // The native type of a descriptor. typedef int native_handle_type; // The implementation type of the descriptor. class implementation_type : private asio::detail::noncopyable { public: // Default constructor. implementation_type() : descriptor_(-1), state_(0) { } private: // Only this service will have access to the internal values. friend class reactive_descriptor_service; // The native descriptor representation. int descriptor_; // The current state of the descriptor. descriptor_ops::state_type state_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; }; // Constructor. ASIO_DECL reactive_descriptor_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new descriptor implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new descriptor implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another descriptor implementation. ASIO_DECL void move_assign(implementation_type& impl, reactive_descriptor_service& other_service, implementation_type& other_impl); // Destroy a descriptor implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native descriptor to a descriptor implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec); // Determine whether the descriptor is open. bool is_open(const implementation_type& impl) const { return impl.descriptor_ != -1; } // Destroy a descriptor implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native descriptor representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.descriptor_; } // Release ownership of the native descriptor representation. ASIO_DECL native_handle_type release(implementation_type& impl); // Cancel all operations associated with the descriptor. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Perform an IO control command on the descriptor. template asio::error_code io_control(implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { descriptor_ops::ioctl(impl.descriptor_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the descriptor. bool non_blocking(const implementation_type& impl) const { return (impl.state_ & descriptor_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the descriptor. asio::error_code non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { descriptor_ops::set_user_non_blocking( impl.descriptor_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native descriptor implementation. bool native_non_blocking(const implementation_type& impl) const { return (impl.state_ & descriptor_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native descriptor implementation. asio::error_code native_non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { descriptor_ops::set_internal_non_blocking( impl.descriptor_, impl.state_, mode, ec); return ec; } // Write some data to the descriptor. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return descriptor_ops::sync_write(impl.descriptor_, impl.state_, bufs.buffers(), bufs.count(), bufs.all_empty(), ec); } // Wait until data can be written without blocking. size_t write_some(implementation_type& impl, const null_buffers&, asio::error_code& ec) { // Wait for descriptor to become ready. descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec); return 0; } // Start an asynchronous write. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef descriptor_write_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.descriptor_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_write_some")); start_op(impl, reactor::write_op, p.p, is_continuation, true, buffer_sequence_adapter::all_empty(buffers)); p.v = p.p = 0; } // Start an asynchronous wait until data can be written without blocking. template void async_write_some(implementation_type& impl, const null_buffers&, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_write_some(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Read some data from the stream. Returns the number of bytes read. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return descriptor_ops::sync_read(impl.descriptor_, impl.state_, bufs.buffers(), bufs.count(), bufs.all_empty(), ec); } // Wait until data can be read without blocking. size_t read_some(implementation_type& impl, const null_buffers&, asio::error_code& ec) { // Wait for descriptor to become ready. descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec); return 0; } // Start an asynchronous read. The buffer for the data being read must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef descriptor_read_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.descriptor_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_read_some")); start_op(impl, reactor::read_op, p.p, is_continuation, true, buffer_sequence_adapter::all_empty(buffers)); p.v = p.p = 0; } // Wait until data can be read without blocking. template void async_read_some(implementation_type& impl, const null_buffers&, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_read_some(null_buffers)")); start_op(impl, reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } private: // Start the asynchronous operation. ASIO_DECL void start_op(implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop); // The selector that performs event demultiplexing for the service. reactor& reactor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_descriptor_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/signal_blocker.hpp0000644000015300001660000000235313042054732021760 0ustar jenkinsjenkins// // detail/signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) || defined(__SYMBIAN32__) # include "asio/detail/null_signal_blocker.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_signal_blocker.hpp" #else # error Only Windows and POSIX are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) || defined(__SYMBIAN32__) typedef null_signal_blocker signal_blocker; #elif defined(ASIO_HAS_PTHREADS) typedef posix_signal_blocker signal_blocker; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_SIGNAL_BLOCKER_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_recvfrom_op.hpp0000644000015300001660000000737513042054732024573 0ustar jenkinsjenkins// // detail/win_iocp_socket_recvfrom_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recvfrom_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvfrom_op); win_iocp_socket_recvfrom_op(Endpoint& endpoint, socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, Handler& handler) : operation(&win_iocp_socket_recvfrom_op::do_complete), endpoint_(endpoint), endpoint_size_(static_cast(endpoint.capacity())), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } int& endpoint_size() { return endpoint_size_; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recvfrom_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recvfrom(o->cancel_token_, ec); // Record the size of the endpoint returned by the operation. o->endpoint_.resize(o->endpoint_size_); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Endpoint& endpoint_; int endpoint_size_; socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP galera-3-25.3.20/asio/asio/detail/win_fd_set_adapter.hpp0000644000015300001660000000733613042054732022631 0ustar jenkinsjenkins// // detail/win_fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Adapts the FD_SET type to meet the Descriptor_Set concept's requirements. class win_fd_set_adapter : noncopyable { public: enum { default_fd_set_size = 1024 }; win_fd_set_adapter() : capacity_(default_fd_set_size), max_descriptor_(invalid_socket) { fd_set_ = static_cast(::operator new( sizeof(win_fd_set) - sizeof(SOCKET) + sizeof(SOCKET) * (capacity_))); fd_set_->fd_count = 0; } ~win_fd_set_adapter() { ::operator delete(fd_set_); } void reset() { fd_set_->fd_count = 0; max_descriptor_ = invalid_socket; } bool set(socket_type descriptor) { for (u_int i = 0; i < fd_set_->fd_count; ++i) if (fd_set_->fd_array[i] == descriptor) return true; reserve(fd_set_->fd_count + 1); fd_set_->fd_array[fd_set_->fd_count++] = descriptor; return true; } void set(reactor_op_queue& operations, op_queue&) { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; reserve(fd_set_->fd_count + 1); fd_set_->fd_array[fd_set_->fd_count++] = op_iter->first; } } bool is_set(socket_type descriptor) const { return !!__WSAFDIsSet(descriptor, const_cast(reinterpret_cast(fd_set_))); } operator fd_set*() { return reinterpret_cast(fd_set_); } socket_type max_descriptor() const { return max_descriptor_; } void perform(reactor_op_queue& operations, op_queue& ops) const { for (u_int i = 0; i < fd_set_->fd_count; ++i) operations.perform_operations(fd_set_->fd_array[i], ops); } private: // This structure is defined to be compatible with the Windows API fd_set // structure, but without being dependent on the value of FD_SETSIZE. We use // the "struct hack" to allow the number of descriptors to be varied at // runtime. struct win_fd_set { u_int fd_count; SOCKET fd_array[1]; }; // Increase the fd_set_ capacity to at least the specified number of elements. void reserve(u_int n) { if (n <= capacity_) return; u_int new_capacity = capacity_ + capacity_ / 2; if (new_capacity < n) new_capacity = n; win_fd_set* new_fd_set = static_cast(::operator new( sizeof(win_fd_set) - sizeof(SOCKET) + sizeof(SOCKET) * (new_capacity))); new_fd_set->fd_count = fd_set_->fd_count; for (u_int i = 0; i < fd_set_->fd_count; ++i) new_fd_set->fd_array[i] = fd_set_->fd_array[i]; ::operator delete(fd_set_); fd_set_ = new_fd_set; capacity_ = new_capacity; } win_fd_set* fd_set_; u_int capacity_; socket_type max_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP galera-3-25.3.20/asio/asio/detail/old_win_sdk_compat.hpp0000644000015300001660000001042513042054732022640 0ustar jenkinsjenkins// // detail/old_win_sdk_compat.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP #define ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Guess whether we are building against on old Platform SDK. #if !defined(IN6ADDR_ANY_INIT) #define ASIO_HAS_OLD_WIN_SDK 1 #endif // !defined(IN6ADDR_ANY_INIT) #if defined(ASIO_HAS_OLD_WIN_SDK) // Emulation of types that are missing from old Platform SDKs. // // N.B. this emulation is also used if building for a Windows 2000 target with // a recent (i.e. Vista or later) SDK, as the SDK does not provide IPv6 support // in that case. #include "asio/detail/push_options.hpp" namespace asio { namespace detail { enum { sockaddr_storage_maxsize = 128, // Maximum size. sockaddr_storage_alignsize = (sizeof(__int64)), // Desired alignment. sockaddr_storage_pad1size = (sockaddr_storage_alignsize - sizeof(short)), sockaddr_storage_pad2size = (sockaddr_storage_maxsize - (sizeof(short) + sockaddr_storage_pad1size + sockaddr_storage_alignsize)) }; struct sockaddr_storage_emulation { short ss_family; char __ss_pad1[sockaddr_storage_pad1size]; __int64 __ss_align; char __ss_pad2[sockaddr_storage_pad2size]; }; struct in6_addr_emulation { union { u_char Byte[16]; u_short Word[8]; } u; }; #if !defined(s6_addr) # define _S6_un u # define _S6_u8 Byte # define s6_addr _S6_un._S6_u8 #endif // !defined(s6_addr) struct sockaddr_in6_emulation { short sin6_family; u_short sin6_port; u_long sin6_flowinfo; in6_addr_emulation sin6_addr; u_long sin6_scope_id; }; struct ipv6_mreq_emulation { in6_addr_emulation ipv6mr_multiaddr; unsigned int ipv6mr_interface; }; struct addrinfo_emulation { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; size_t ai_addrlen; char* ai_canonname; sockaddr* ai_addr; addrinfo_emulation* ai_next; }; #if !defined(AI_PASSIVE) # define AI_PASSIVE 0x1 #endif #if !defined(AI_CANONNAME) # define AI_CANONNAME 0x2 #endif #if !defined(AI_NUMERICHOST) # define AI_NUMERICHOST 0x4 #endif #if !defined(EAI_AGAIN) # define EAI_AGAIN WSATRY_AGAIN #endif #if !defined(EAI_BADFLAGS) # define EAI_BADFLAGS WSAEINVAL #endif #if !defined(EAI_FAIL) # define EAI_FAIL WSANO_RECOVERY #endif #if !defined(EAI_FAMILY) # define EAI_FAMILY WSAEAFNOSUPPORT #endif #if !defined(EAI_MEMORY) # define EAI_MEMORY WSA_NOT_ENOUGH_MEMORY #endif #if !defined(EAI_NODATA) # define EAI_NODATA WSANO_DATA #endif #if !defined(EAI_NONAME) # define EAI_NONAME WSAHOST_NOT_FOUND #endif #if !defined(EAI_SERVICE) # define EAI_SERVICE WSATYPE_NOT_FOUND #endif #if !defined(EAI_SOCKTYPE) # define EAI_SOCKTYPE WSAESOCKTNOSUPPORT #endif #if !defined(NI_NOFQDN) # define NI_NOFQDN 0x01 #endif #if !defined(NI_NUMERICHOST) # define NI_NUMERICHOST 0x02 #endif #if !defined(NI_NAMEREQD) # define NI_NAMEREQD 0x04 #endif #if !defined(NI_NUMERICSERV) # define NI_NUMERICSERV 0x08 #endif #if !defined(NI_DGRAM) # define NI_DGRAM 0x10 #endif #if !defined(IPPROTO_IPV6) # define IPPROTO_IPV6 41 #endif #if !defined(IPV6_UNICAST_HOPS) # define IPV6_UNICAST_HOPS 4 #endif #if !defined(IPV6_MULTICAST_IF) # define IPV6_MULTICAST_IF 9 #endif #if !defined(IPV6_MULTICAST_HOPS) # define IPV6_MULTICAST_HOPS 10 #endif #if !defined(IPV6_MULTICAST_LOOP) # define IPV6_MULTICAST_LOOP 11 #endif #if !defined(IPV6_JOIN_GROUP) # define IPV6_JOIN_GROUP 12 #endif #if !defined(IPV6_LEAVE_GROUP) # define IPV6_LEAVE_GROUP 13 #endif } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_OLD_WIN_SDK) // Even newer Platform SDKs that support IPv6 may not define IPV6_V6ONLY. #if !defined(IPV6_V6ONLY) # define IPV6_V6ONLY 27 #endif // Some SDKs (e.g. Windows CE) don't define IPPROTO_ICMPV6. #if !defined(IPPROTO_ICMPV6) # define IPPROTO_ICMPV6 58 #endif #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP galera-3-25.3.20/asio/asio/detail/array.hpp0000644000015300001660000000160613042054732020120 0ustar jenkinsjenkins// // detail/array.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ARRAY_HPP #define ASIO_DETAIL_ARRAY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_ARRAY) # include #else // defined(ASIO_HAS_STD_ARRAY) # include #endif // defined(ASIO_HAS_STD_ARRAY) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_ARRAY) using std::array; #else // defined(ASIO_HAS_STD_ARRAY) using boost::array; #endif // defined(ASIO_HAS_STD_ARRAY) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ARRAY_HPP galera-3-25.3.20/asio/asio/detail/null_thread.hpp0000644000015300001660000000231713042054732021303 0ustar jenkinsjenkins// // detail/null_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_THREAD_HPP #define ASIO_DETAIL_NULL_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_thread : private noncopyable { public: // Constructor. template null_thread(Function, unsigned int = 0) { asio::detail::throw_error( asio::error::operation_not_supported, "thread"); } // Destructor. ~null_thread() { } // Wait for the thread to exit. void join() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_THREAD_HPP galera-3-25.3.20/asio/asio/detail/buffer_resize_guard.hpp0000644000015300001660000000275013042054732023017 0ustar jenkinsjenkins// // detail/buffer_resize_guard.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP #define ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to manage buffer resizing in an exception safe way. template class buffer_resize_guard { public: // Constructor. buffer_resize_guard(Buffer& buffer) : buffer_(buffer), old_size_(buffer.size()) { } // Destructor rolls back the buffer resize unless commit was called. ~buffer_resize_guard() { if (old_size_ != (std::numeric_limits::max)()) { buffer_.resize(old_size_); } } // Commit the resize transaction. void commit() { old_size_ = (std::numeric_limits::max)(); } private: // The buffer being managed. Buffer& buffer_; // The size of the buffer at the time the guard was constructed. size_t old_size_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP galera-3-25.3.20/asio/asio/detail/posix_signal_blocker.hpp0000644000015300001660000000353213042054732023202 0ustar jenkinsjenkins// // detail/posix_signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_signal_blocker : private noncopyable { public: // Constructor blocks all signals for the calling thread. posix_signal_blocker() : blocked_(false) { sigset_t new_mask; sigfillset(&new_mask); blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0); } // Destructor restores the previous signal mask. ~posix_signal_blocker() { if (blocked_) pthread_sigmask(SIG_SETMASK, &old_mask_, 0); } // Block all signals for the calling thread. void block() { if (!blocked_) { sigset_t new_mask; sigfillset(&new_mask); blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0); } } // Restore the previous signal mask. void unblock() { if (blocked_) blocked_ = (pthread_sigmask(SIG_SETMASK, &old_mask_, 0) != 0); } private: // Have signals been blocked. bool blocked_; // The previous signal mask. sigset_t old_mask_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP galera-3-25.3.20/asio/asio/detail/macos_fenced_block.hpp0000644000015300001660000000235413042054732022563 0ustar jenkinsjenkins// // detail/macos_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP #define ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__MACH__) && defined(__APPLE__) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class macos_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit macos_fenced_block(half_t) { } // Constructor for a full fenced block. explicit macos_fenced_block(full_t) { OSMemoryBarrier(); } // Destructor. ~macos_fenced_block() { OSMemoryBarrier(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__MACH__) && defined(__APPLE__) #endif // ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/fenced_block.hpp0000644000015300001660000000526513042054732021405 0ustar jenkinsjenkins// // detail/fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FENCED_BLOCK_HPP #define ASIO_DETAIL_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_DISABLE_FENCED_BLOCK) # include "asio/detail/null_fenced_block.hpp" #elif defined(__MACH__) && defined(__APPLE__) # include "asio/detail/macos_fenced_block.hpp" #elif defined(__sun) # include "asio/detail/solaris_fenced_block.hpp" #elif defined(__GNUC__) && defined(__arm__) \ && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) # include "asio/detail/gcc_arm_fenced_block.hpp" #elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) # include "asio/detail/gcc_hppa_fenced_block.hpp" #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # include "asio/detail/gcc_x86_fenced_block.hpp" #elif defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) # include "asio/detail/gcc_sync_fenced_block.hpp" #elif defined(ASIO_WINDOWS) && !defined(UNDER_CE) # include "asio/detail/win_fenced_block.hpp" #else # include "asio/detail/null_fenced_block.hpp" #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_DISABLE_FENCED_BLOCK) typedef null_fenced_block fenced_block; #elif defined(__MACH__) && defined(__APPLE__) typedef macos_fenced_block fenced_block; #elif defined(__sun) typedef solaris_fenced_block fenced_block; #elif defined(__GNUC__) && defined(__arm__) \ && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) typedef gcc_arm_fenced_block fenced_block; #elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) typedef gcc_hppa_fenced_block fenced_block; #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) typedef gcc_x86_fenced_block fenced_block; #elif defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) typedef gcc_sync_fenced_block fenced_block; #elif defined(ASIO_WINDOWS) && !defined(UNDER_CE) typedef win_fenced_block fenced_block; #else typedef null_fenced_block fenced_block; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/consuming_buffers.hpp0000644000015300001660000001626413042054732022526 0ustar jenkinsjenkins// // detail/consuming_buffers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONSUMING_BUFFERS_HPP #define ASIO_DETAIL_CONSUMING_BUFFERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/buffer.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A proxy iterator for a sub-range in a list of buffers. template class consuming_buffers_iterator { public: /// The type used for the distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of the value pointed to by the iterator. typedef Buffer value_type; /// The type of the result of applying operator->() to the iterator. typedef const Buffer* pointer; /// The type of the result of applying operator*() to the iterator. typedef const Buffer& reference; /// The iterator category. typedef std::forward_iterator_tag iterator_category; // Default constructor creates an end iterator. consuming_buffers_iterator() : at_end_(true) { } // Construct with a buffer for the first entry and an iterator // range for the remaining entries. consuming_buffers_iterator(bool at_end, const Buffer& first, Buffer_Iterator begin_remainder, Buffer_Iterator end_remainder, std::size_t max_size) : at_end_(max_size > 0 ? at_end : true), first_(buffer(first, max_size)), begin_remainder_(begin_remainder), end_remainder_(end_remainder), offset_(0), max_size_(max_size) { } // Dereference an iterator. const Buffer& operator*() const { return dereference(); } // Dereference an iterator. const Buffer* operator->() const { return &dereference(); } // Increment operator (prefix). consuming_buffers_iterator& operator++() { increment(); return *this; } // Increment operator (postfix). consuming_buffers_iterator operator++(int) { consuming_buffers_iterator tmp(*this); ++*this; return tmp; } // Test two iterators for equality. friend bool operator==(const consuming_buffers_iterator& a, const consuming_buffers_iterator& b) { return a.equal(b); } // Test two iterators for inequality. friend bool operator!=(const consuming_buffers_iterator& a, const consuming_buffers_iterator& b) { return !a.equal(b); } private: void increment() { if (!at_end_) { if (begin_remainder_ == end_remainder_ || offset_ + buffer_size(first_) >= max_size_) { at_end_ = true; } else { offset_ += buffer_size(first_); first_ = buffer(*begin_remainder_++, max_size_ - offset_); } } } bool equal(const consuming_buffers_iterator& other) const { if (at_end_ && other.at_end_) return true; return !at_end_ && !other.at_end_ && buffer_cast(first_) == buffer_cast(other.first_) && buffer_size(first_) == buffer_size(other.first_) && begin_remainder_ == other.begin_remainder_ && end_remainder_ == other.end_remainder_; } const Buffer& dereference() const { return first_; } bool at_end_; Buffer first_; Buffer_Iterator begin_remainder_; Buffer_Iterator end_remainder_; std::size_t offset_; std::size_t max_size_; }; // A proxy for a sub-range in a list of buffers. template class consuming_buffers { public: // The type for each element in the list of buffers. typedef Buffer value_type; // A forward-only iterator type that may be used to read elements. typedef consuming_buffers_iterator const_iterator; // Construct to represent the entire list of buffers. consuming_buffers(const Buffers& buffers) : buffers_(buffers), at_end_(buffers_.begin() == buffers_.end()), begin_remainder_(buffers_.begin()), max_size_((std::numeric_limits::max)()) { if (!at_end_) { first_ = *buffers_.begin(); ++begin_remainder_; } } // Copy constructor. consuming_buffers(const consuming_buffers& other) : buffers_(other.buffers_), at_end_(other.at_end_), first_(other.first_), begin_remainder_(buffers_.begin()), max_size_(other.max_size_) { typename Buffers::const_iterator first = other.buffers_.begin(); typename Buffers::const_iterator second = other.begin_remainder_; std::advance(begin_remainder_, std::distance(first, second)); } // Assignment operator. consuming_buffers& operator=(const consuming_buffers& other) { buffers_ = other.buffers_; at_end_ = other.at_end_; first_ = other.first_; begin_remainder_ = buffers_.begin(); typename Buffers::const_iterator first = other.buffers_.begin(); typename Buffers::const_iterator second = other.begin_remainder_; std::advance(begin_remainder_, std::distance(first, second)); max_size_ = other.max_size_; return *this; } // Get a forward-only iterator to the first element. const_iterator begin() const { return const_iterator(at_end_, first_, begin_remainder_, buffers_.end(), max_size_); } // Get a forward-only iterator for one past the last element. const_iterator end() const { return const_iterator(); } // Set the maximum size for a single transfer. void prepare(std::size_t max_size) { max_size_ = max_size; } // Consume the specified number of bytes from the buffers. void consume(std::size_t size) { // Remove buffers from the start until the specified size is reached. while (size > 0 && !at_end_) { if (buffer_size(first_) <= size) { size -= buffer_size(first_); if (begin_remainder_ == buffers_.end()) at_end_ = true; else first_ = *begin_remainder_++; } else { first_ = first_ + size; size = 0; } } // Remove any more empty buffers at the start. while (!at_end_ && buffer_size(first_) == 0) { if (begin_remainder_ == buffers_.end()) at_end_ = true; else first_ = *begin_remainder_++; } } private: Buffers buffers_; bool at_end_; Buffer first_; typename Buffers::const_iterator begin_remainder_; std::size_t max_size_; }; // Specialisation for null_buffers to ensure that the null_buffers type is // always passed through to the underlying read or write operation. template class consuming_buffers : public asio::null_buffers { public: consuming_buffers(const asio::null_buffers&) { // No-op. } void prepare(std::size_t) { // No-op. } void consume(std::size_t) { // No-op. } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CONSUMING_BUFFERS_HPP galera-3-25.3.20/asio/asio/detail/wince_thread.hpp0000644000015300001660000000444613042054732021443 0ustar jenkinsjenkins// // detail/wince_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINCE_THREAD_HPP #define ASIO_DETAIL_WINCE_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && defined(UNDER_CE) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD WINAPI wince_thread_function(LPVOID arg); class wince_thread : private noncopyable { public: // Constructor. template wince_thread(Function f, unsigned int = 0) { std::auto_ptr arg(new func(f)); DWORD thread_id = 0; thread_ = ::CreateThread(0, 0, wince_thread_function, arg.get(), 0, &thread_id); if (!thread_) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } arg.release(); } // Destructor. ~wince_thread() { ::CloseHandle(thread_); } // Wait for the thread to exit. void join() { ::WaitForSingleObject(thread_, INFINITE); } private: friend DWORD WINAPI wince_thread_function(LPVOID arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ::HANDLE thread_; }; inline DWORD WINAPI wince_thread_function(LPVOID arg) { std::auto_ptr func( static_cast(arg)); func->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && defined(UNDER_CE) #endif // ASIO_DETAIL_WINCE_THREAD_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_io_service.hpp0000644000015300001660000002430513042054732023021 0ustar jenkinsjenkins// // detail/win_iocp_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_IO_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/io_service.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/detail/win_iocp_operation.hpp" #include "asio/detail/win_iocp_thread_info.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class wait_op; class win_iocp_io_service : public asio::detail::service_base { public: // Constructor. Specifies a concurrency hint that is passed through to the // underlying I/O completion port. ASIO_DECL win_iocp_io_service(asio::io_service& io_service, size_t concurrency_hint = 0); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Initialise the task. Nothing to do here. void init_task() { } // Register a handle with the IO completion port. ASIO_DECL asio::error_code register_handle( HANDLE handle, asio::error_code& ec); // Run the event loop until stopped or no more work. ASIO_DECL size_t run(asio::error_code& ec); // Run until stopped or one operation is performed. ASIO_DECL size_t run_one(asio::error_code& ec); // Poll for operations without blocking. ASIO_DECL size_t poll(asio::error_code& ec); // Poll for one operation without blocking. ASIO_DECL size_t poll_one(asio::error_code& ec); // Stop the event processing loop. ASIO_DECL void stop(); // Determine whether the io_service is stopped. bool stopped() const { return ::InterlockedExchangeAdd(&stopped_, 0) != 0; } // Reset in preparation for a subsequent run invocation. void reset() { ::InterlockedExchange(&stopped_, 0); } // Notify that some work has started. void work_started() { ::InterlockedIncrement(&outstanding_work_); } // Notify that some work has finished. void work_finished() { if (::InterlockedDecrement(&outstanding_work_) == 0) stop(); } // Return whether a handler can be dispatched immediately. bool can_dispatch() { return thread_call_stack::contains(this) != 0; } // Request invocation of the given handler. template void dispatch(Handler& handler); // Request invocation of the given handler and return immediately. template void post(Handler& handler); // Request invocation of the given operation and return immediately. Assumes // that work_started() has not yet been called for the operation. void post_immediate_completion(win_iocp_operation* op, bool) { work_started(); post_deferred_completion(op); } // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operation. ASIO_DECL void post_deferred_completion(win_iocp_operation* op); // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operations. ASIO_DECL void post_deferred_completions( op_queue& ops); // Request invocation of the given operation using the thread-private queue // and return immediately. Assumes that work_started() has not yet been // called for the operation. void post_private_immediate_completion(win_iocp_operation* op) { post_immediate_completion(op, false); } // Request invocation of the given operation using the thread-private queue // and return immediately. Assumes that work_started() was previously called // for the operation. void post_private_deferred_completion(win_iocp_operation* op) { post_deferred_completion(op); } // Process unfinished operations as part of a shutdown_service operation. // Assumes that work_started() was previously called for the operations. ASIO_DECL void abandon_operations(op_queue& ops); // Called after starting an overlapped I/O operation that did not complete // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_pending(win_iocp_operation* op); // Called after starting an overlapped I/O operation that completed // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_completion(win_iocp_operation* op, DWORD last_error = 0, DWORD bytes_transferred = 0); // Called after starting an overlapped I/O operation that completed // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_completion(win_iocp_operation* op, const asio::error_code& ec, DWORD bytes_transferred = 0); // Add a new timer queue to the service. template void add_timer_queue(timer_queue& timer_queue); // Remove a timer queue from the service. template void remove_timer_queue(timer_queue& timer_queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer associated with the given token. Returns the number of // handlers that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); private: #if defined(WINVER) && (WINVER < 0x0500) typedef DWORD dword_ptr_t; typedef ULONG ulong_ptr_t; #else // defined(WINVER) && (WINVER < 0x0500) typedef DWORD_PTR dword_ptr_t; typedef ULONG_PTR ulong_ptr_t; #endif // defined(WINVER) && (WINVER < 0x0500) // Dequeues at most one operation from the I/O completion port, and then // executes it. Returns the number of operations that were dequeued (i.e. // either 0 or 1). ASIO_DECL size_t do_one(bool block, asio::error_code& ec); // Helper to calculate the GetQueuedCompletionStatus timeout. ASIO_DECL static DWORD get_gqcs_timeout(); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Called to recalculate and update the timeout. ASIO_DECL void update_timeout(); // Helper class to call work_finished() on block exit. struct work_finished_on_block_exit; // Helper class for managing a HANDLE. struct auto_handle { HANDLE handle; auto_handle() : handle(0) {} ~auto_handle() { if (handle) ::CloseHandle(handle); } }; // The IO completion port used for queueing operations. auto_handle iocp_; // The count of unfinished work. long outstanding_work_; // Flag to indicate whether the event loop has been stopped. mutable long stopped_; // Flag to indicate whether there is an in-flight stop event. Every event // posted using PostQueuedCompletionStatus consumes non-paged pool, so to // avoid exhausting this resouce we limit the number of outstanding events. long stop_event_posted_; // Flag to indicate whether the service has been shut down. long shutdown_; enum { // Timeout to use with GetQueuedCompletionStatus on older versions of // Windows. Some versions of windows have a "bug" where a call to // GetQueuedCompletionStatus can appear stuck even though there are events // waiting on the queue. Using a timeout helps to work around the issue. default_gqcs_timeout = 500, // Maximum waitable timer timeout, in milliseconds. max_timeout_msec = 5 * 60 * 1000, // Maximum waitable timer timeout, in microseconds. max_timeout_usec = max_timeout_msec * 1000, // Completion key value used to wake up a thread to dispatch timers or // completed operations. wake_for_dispatch = 1, // Completion key value to indicate that an operation has posted with the // original last_error and bytes_transferred values stored in the fields of // the OVERLAPPED structure. overlapped_contains_result = 2 }; // Timeout to use with GetQueuedCompletionStatus. const DWORD gqcs_timeout_; // Function object for processing timeouts in a background thread. struct timer_thread_function; friend struct timer_thread_function; // Background thread used for processing timeouts. scoped_ptr timer_thread_; // A waitable timer object used for waiting for timeouts. auto_handle waitable_timer_; // Non-zero if timers or completed operations need to be dispatched. long dispatch_required_; // Mutex for protecting access to the timer queues and completed operations. mutex dispatch_mutex_; // The timer queues. timer_queue_set timer_queues_; // The operations that are ready to dispatch. op_queue completed_ops_; // Per-thread call stack to track the state of each thread in the io_service. typedef call_stack thread_call_stack; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/win_iocp_io_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_io_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_IO_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_null_buffers_op.hpp0000644000015300001660000000675713042054732024071 0ustar jenkinsjenkins// // detail/win_iocp_null_buffers_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP #define ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_null_buffers_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_null_buffers_op); win_iocp_null_buffers_op(socket_ops::weak_cancel_token_type cancel_token, Handler& handler) : reactor_op(&win_iocp_null_buffers_op::do_perform, &win_iocp_null_buffers_op::do_complete), cancel_token_(cancel_token), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static bool do_perform(reactor_op*) { return true; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_null_buffers_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // The reactor may have stored a result in the operation object. if (o->ec_) ec = o->ec_; // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (o->cancel_token_.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP galera-3-25.3.20/asio/asio/detail/op_queue.hpp0000644000015300001660000000601213042054732020620 0ustar jenkinsjenkins// // detail/op_queue.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OP_QUEUE_HPP #define ASIO_DETAIL_OP_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class op_queue; class op_queue_access { public: template static Operation* next(Operation* o) { return static_cast(o->next_); } template static void next(Operation1*& o1, Operation2* o2) { o1->next_ = o2; } template static void destroy(Operation* o) { o->destroy(); } template static Operation*& front(op_queue& q) { return q.front_; } template static Operation*& back(op_queue& q) { return q.back_; } }; template class op_queue : private noncopyable { public: // Constructor. op_queue() : front_(0), back_(0) { } // Destructor destroys all operations. ~op_queue() { while (Operation* op = front_) { pop(); op_queue_access::destroy(op); } } // Get the operation at the front of the queue. Operation* front() { return front_; } // Pop an operation from the front of the queue. void pop() { if (front_) { Operation* tmp = front_; front_ = op_queue_access::next(front_); if (front_ == 0) back_ = 0; op_queue_access::next(tmp, static_cast(0)); } } // Push an operation on to the back of the queue. void push(Operation* h) { op_queue_access::next(h, static_cast(0)); if (back_) { op_queue_access::next(back_, h); back_ = h; } else { front_ = back_ = h; } } // Push all operations from another queue on to the back of the queue. The // source queue may contain operations of a derived type. template void push(op_queue& q) { if (Operation* other_front = op_queue_access::front(q)) { if (back_) op_queue_access::next(back_, other_front); else front_ = other_front; back_ = op_queue_access::back(q); op_queue_access::front(q) = 0; op_queue_access::back(q) = 0; } } // Whether the queue is empty. bool empty() const { return front_ == 0; } private: friend class op_queue_access; // The front of the queue. Operation* front_; // The back of the queue. Operation* back_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_OP_QUEUE_HPP galera-3-25.3.20/asio/asio/detail/limits.hpp0000644000015300001660000000126413042054732020303 0ustar jenkinsjenkins// // detail/limits.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_LIMITS_HPP #define ASIO_DETAIL_LIMITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_LIMITS) # include #else // defined(ASIO_HAS_BOOST_LIMITS) # include #endif // defined(ASIO_HAS_BOOST_LIMITS) #endif // ASIO_DETAIL_LIMITS_HPP galera-3-25.3.20/asio/asio/detail/reactive_serial_port_service.hpp0000644000015300001660000001701013042054732024723 0ustar jenkinsjenkins// // detail/reactive_serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/serial_port_base.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Extend reactive_descriptor_service to provide serial port support. class reactive_serial_port_service { public: // The native type of a serial port. typedef reactive_descriptor_service::native_handle_type native_handle_type; // The implementation type of the serial port. typedef reactive_descriptor_service::implementation_type implementation_type; ASIO_DECL reactive_serial_port_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new serial port implementation. void construct(implementation_type& impl) { descriptor_service_.construct(impl); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { descriptor_service_.move_construct(impl, other_impl); } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, reactive_serial_port_service& other_service, implementation_type& other_impl) { descriptor_service_.move_assign(impl, other_service.descriptor_service_, other_impl); } // Destroy a serial port implementation. void destroy(implementation_type& impl) { descriptor_service_.destroy(impl); } // Open the serial port using the specified device name. ASIO_DECL asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec); // Assign a native descriptor to a serial port implementation. asio::error_code assign(implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec) { return descriptor_service_.assign(impl, native_descriptor, ec); } // Determine whether the serial port is open. bool is_open(const implementation_type& impl) const { return descriptor_service_.is_open(impl); } // Destroy a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return descriptor_service_.close(impl, ec); } // Get the native serial port representation. native_handle_type native_handle(implementation_type& impl) { return descriptor_service_.native_handle(impl); } // Cancel all operations associated with the serial port. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return descriptor_service_.cancel(impl, ec); } // Set an option on the serial port. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return do_set_option(impl, &reactive_serial_port_service::store_option, &option, ec); } // Get an option from the serial port. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return do_get_option(impl, &reactive_serial_port_service::load_option, &option, ec); } // Send a break sequence to the serial port. asio::error_code send_break(implementation_type& impl, asio::error_code& ec) { errno = 0; descriptor_ops::error_wrapper(::tcsendbreak( descriptor_service_.native_handle(impl), 0), ec); return ec; } // Write the given data. Returns the number of bytes sent. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return descriptor_service_.write_some(impl, buffers, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { descriptor_service_.async_write_some(impl, buffers, handler); } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return descriptor_service_.read_some(impl, buffers, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { descriptor_service_.async_read_some(impl, buffers, handler); } private: // Function pointer type for storing a serial port option. typedef asio::error_code (*store_function_type)( const void*, termios&, asio::error_code&); // Helper function template to store a serial port option. template static asio::error_code store_option(const void* option, termios& storage, asio::error_code& ec) { return static_cast(option)->store( storage, ec); } // Helper function to set a serial port option. ASIO_DECL asio::error_code do_set_option( implementation_type& impl, store_function_type store, const void* option, asio::error_code& ec); // Function pointer type for loading a serial port option. typedef asio::error_code (*load_function_type)( void*, const termios&, asio::error_code&); // Helper function template to load a serial port option. template static asio::error_code load_option(void* option, const termios& storage, asio::error_code& ec) { return static_cast(option)->load(storage, ec); } // Helper function to get a serial port option. ASIO_DECL asio::error_code do_get_option( const implementation_type& impl, load_function_type load, void* option, asio::error_code& ec) const; // The implementation used for initiating asynchronous operations. reactive_descriptor_service descriptor_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_serial_port_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/resolver_service_base.hpp0000644000015300001660000000706213042054732023357 0ustar jenkinsjenkins// // detail/resolver_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP #define ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolver_service_base { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the background thread that the operation has been cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // Constructor. ASIO_DECL resolver_service_base(asio::io_service& io_service); // Destructor. ASIO_DECL ~resolver_service_base(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Perform any fork-related housekeeping. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Construct a new resolver implementation. ASIO_DECL void construct(implementation_type& impl); // Destroy a resolver implementation. ASIO_DECL void destroy(implementation_type&); // Cancel pending asynchronous operations. ASIO_DECL void cancel(implementation_type& impl); protected: // Helper function to start an asynchronous resolve operation. ASIO_DECL void start_resolve_op(operation* op); #if !defined(ASIO_WINDOWS_RUNTIME) // Helper class to perform exception-safe cleanup of addrinfo objects. class auto_addrinfo : private asio::detail::noncopyable { public: explicit auto_addrinfo(asio::detail::addrinfo_type* ai) : ai_(ai) { } ~auto_addrinfo() { if (ai_) socket_ops::freeaddrinfo(ai_); } operator asio::detail::addrinfo_type*() { return ai_; } private: asio::detail::addrinfo_type* ai_; }; #endif // !defined(ASIO_WINDOWS_RUNTIME) // Helper class to run the work io_service in a thread. class work_io_service_runner; // Start the work thread if it's not already running. ASIO_DECL void start_work_thread(); // The io_service implementation used to post completions. io_service_impl& io_service_impl_; private: // Mutex to protect access to internal data. asio::detail::mutex mutex_; // Private io_service used for performing asynchronous host resolution. asio::detail::scoped_ptr work_io_service_; // The work io_service implementation used to post completions. io_service_impl& work_io_service_impl_; // Work for the private io_service to perform. asio::detail::scoped_ptr work_; // Thread used for running the work io_service's run loop. asio::detail::scoped_ptr work_thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/resolver_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP galera-3-25.3.20/asio/asio/detail/winrt_ssocket_service_base.hpp0000644000015300001660000002670713042054732024423 0ustar jenkinsjenkins// // detail/winrt_ssocket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/winrt_async_manager.hpp" #include "asio/detail/winrt_socket_recv_op.hpp" #include "asio/detail/winrt_socket_send_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_ssocket_service_base { public: // The native type of a socket. typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type; // The implementation type of the socket. struct base_implementation_type { // Default constructor. base_implementation_type() : socket_(nullptr), next_(0), prev_(0) { } // The underlying native socket. native_handle_type socket_; // Pointers to adjacent socket implementations in linked list. base_implementation_type* next_; base_implementation_type* prev_; }; // Constructor. ASIO_DECL winrt_ssocket_service_base( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type&); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, winrt_ssocket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != nullptr; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Get the native socket representation. native_handle_type native_handle(base_implementation_type& impl) { return impl.socket_; } // Cancel all operations associated with the socket. asio::error_code cancel(base_implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return false; } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return 0; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type&, IO_Control_Command&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type&) const { return false; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type&) const { return false; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type&, socket_base::shutdown_type, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Send the given data to the peer. template std::size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return do_send(impl, buffer_sequence_adapter::first(buffers), flags, ec); } // Wait until data can be sent without blocking. std::size_t send(base_implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send")); start_send_op(impl, buffer_sequence_adapter::first(buffers), flags, p.p, is_continuation); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.get_io_service().post( detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data from the peer. Returns the number of bytes received. template std::size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return do_receive(impl, buffer_sequence_adapter::first(buffers), flags, ec); } // Wait until data can be received without blocking. std::size_t receive(base_implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive")); start_receive_op(impl, buffer_sequence_adapter::first(buffers), flags, p.p, is_continuation); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.get_io_service().post( detail::bind_handler(handler, ec, bytes_transferred)); } protected: // Helper function to obtain endpoints associated with the connection. ASIO_DECL std::size_t do_get_endpoint( const base_implementation_type& impl, bool local, void* addr, std::size_t addr_len, asio::error_code& ec) const; // Helper function to set a socket option. ASIO_DECL asio::error_code do_set_option( base_implementation_type& impl, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec); // Helper function to get a socket option. ASIO_DECL void do_get_option( const base_implementation_type& impl, int level, int optname, void* optval, std::size_t* optlen, asio::error_code& ec) const; // Helper function to perform a synchronous connect. ASIO_DECL asio::error_code do_connect( base_implementation_type& impl, const void* addr, asio::error_code& ec); // Helper function to start an asynchronous connect. ASIO_DECL void start_connect_op( base_implementation_type& impl, const void* addr, winrt_async_op* op, bool is_continuation); // Helper function to perform a synchronous send. ASIO_DECL std::size_t do_send( base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, asio::error_code& ec); // Helper function to start an asynchronous send. ASIO_DECL void start_send_op(base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation); // Helper function to perform a synchronous receive. ASIO_DECL std::size_t do_receive( base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, asio::error_code& ec); // Helper function to start an asynchronous receive. ASIO_DECL void start_receive_op(base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation); // The io_service implementation used for delivering completions. io_service_impl& io_service_; // The manager that keeps track of outstanding operations. winrt_async_manager& async_manager_; // Mutex to protect access to the linked list of implementations. asio::detail::mutex mutex_; // The head of a linked list of all implementations. base_implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winrt_ssocket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP galera-3-25.3.20/asio/asio/detail/strand_service.hpp0000644000015300001660000001076613042054732022024 0ustar jenkinsjenkins// // detail/strand_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STRAND_SERVICE_HPP #define ASIO_DETAIL_STRAND_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/io_service.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Default service implementation for a strand. class strand_service : public asio::detail::service_base { private: // Helper class to re-post the strand on exit. struct on_do_complete_exit; // Helper class to re-post the strand on exit. struct on_dispatch_exit; public: // The underlying implementation of a strand. class strand_impl : public operation { public: strand_impl(); private: // Only this service will have access to the internal values. friend class strand_service; friend struct on_do_complete_exit; friend struct on_dispatch_exit; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // Indicates whether the strand is currently "locked" by a handler. This // means that there is a handler upcall in progress, or that the strand // itself has been scheduled in order to invoke some pending handlers. bool locked_; // The handlers that are waiting on the strand but should not be run until // after the next time the strand is scheduled. This queue must only be // modified while the mutex is locked. op_queue waiting_queue_; // The handlers that are ready to be run. Logically speaking, these are the // handlers that hold the strand's lock. The ready queue is only modified // from within the strand and so may be accessed without locking the mutex. op_queue ready_queue_; }; typedef strand_impl* implementation_type; // Construct a new strand service for the specified io_service. ASIO_DECL explicit strand_service(asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new strand implementation. ASIO_DECL void construct(implementation_type& impl); // Request the io_service to invoke the given handler. template void dispatch(implementation_type& impl, Handler& handler); // Request the io_service to invoke the given handler and return immediately. template void post(implementation_type& impl, Handler& handler); // Determine whether the strand is running in the current thread. ASIO_DECL bool running_in_this_thread( const implementation_type& impl) const; private: // Helper function to dispatch a handler. Returns true if the handler should // be dispatched immediately. ASIO_DECL bool do_dispatch(implementation_type& impl, operation* op); // Helper fiunction to post a handler. ASIO_DECL void do_post(implementation_type& impl, operation* op, bool is_continuation); ASIO_DECL static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to the array of implementations. asio::detail::mutex mutex_; // Number of implementations shared between all strand objects. #if defined(ASIO_STRAND_IMPLEMENTATIONS) enum { num_implementations = ASIO_STRAND_IMPLEMENTATIONS }; #else // defined(ASIO_STRAND_IMPLEMENTATIONS) enum { num_implementations = 193 }; #endif // defined(ASIO_STRAND_IMPLEMENTATIONS) // Pool of implementations. scoped_ptr implementations_[num_implementations]; // Extra value used when hashing to prevent recycled memory locations from // getting the same strand implementation. std::size_t salt_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/strand_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/strand_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_STRAND_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/socket_select_interrupter.hpp0000644000015300001660000000462513042054732024300 0ustar jenkinsjenkins// // detail/socket_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class socket_select_interrupter { public: // Constructor. ASIO_DECL socket_select_interrupter(); // Destructor. ASIO_DECL ~socket_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. socket_type read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. socket_type read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. socket_type write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/socket_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP galera-3-25.3.20/asio/asio/detail/posix_static_mutex.hpp0000644000015300001660000000252113042054732022732 0ustar jenkinsjenkins// // detail/posix_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP #define ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct posix_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. void init() { // Nothing to do. } // Lock the mutex. void lock() { (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL. } // Unlock the mutex. void unlock() { (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL. } ::pthread_mutex_t mutex_; }; #define ASIO_POSIX_STATIC_MUTEX_INIT { PTHREAD_MUTEX_INITIALIZER } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/weak_ptr.hpp0000644000015300001660000000167513042054732020624 0ustar jenkinsjenkins// // detail/weak_ptr.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WEAK_PTR_HPP #define ASIO_DETAIL_WEAK_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SHARED_PTR) # include #else // defined(ASIO_HAS_STD_SHARED_PTR) # include #endif // defined(ASIO_HAS_STD_SHARED_PTR) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_SHARED_PTR) using std::weak_ptr; #else // defined(ASIO_HAS_STD_SHARED_PTR) using boost::weak_ptr; #endif // defined(ASIO_HAS_STD_SHARED_PTR) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_WEAK_PTR_HPP galera-3-25.3.20/asio/asio/detail/win_thread.hpp0000644000015300001660000000545013042054732021127 0ustar jenkinsjenkins// // detail/win_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_THREAD_HPP #define ASIO_DETAIL_WIN_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && !defined(UNDER_CE) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { ASIO_DECL unsigned int __stdcall win_thread_function(void* arg); #if defined(WINVER) && (WINVER < 0x0500) ASIO_DECL void __stdcall apc_function(ULONG data); #else ASIO_DECL void __stdcall apc_function(ULONG_PTR data); #endif template class win_thread_base { public: static bool terminate_threads() { return ::InterlockedExchangeAdd(&terminate_threads_, 0) != 0; } static void set_terminate_threads(bool b) { ::InterlockedExchange(&terminate_threads_, b ? 1 : 0); } private: static long terminate_threads_; }; template long win_thread_base::terminate_threads_ = 0; class win_thread : private noncopyable, public win_thread_base { public: // Constructor. template win_thread(Function f, unsigned int stack_size = 0) : thread_(0), exit_event_(0) { start_thread(new func(f), stack_size); } // Destructor. ASIO_DECL ~win_thread(); // Wait for the thread to exit. ASIO_DECL void join(); private: friend ASIO_DECL unsigned int __stdcall win_thread_function(void* arg); #if defined(WINVER) && (WINVER < 0x0500) friend ASIO_DECL void __stdcall apc_function(ULONG); #else friend ASIO_DECL void __stdcall apc_function(ULONG_PTR); #endif class func_base { public: virtual ~func_base() {} virtual void run() = 0; ::HANDLE entry_event_; ::HANDLE exit_event_; }; struct auto_func_base_ptr { func_base* ptr; ~auto_func_base_ptr() { delete ptr; } }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ASIO_DECL void start_thread(func_base* arg, unsigned int stack_size); ::HANDLE thread_; ::HANDLE exit_event_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_thread.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) && !defined(UNDER_CE) #endif // ASIO_DETAIL_WIN_THREAD_HPP galera-3-25.3.20/asio/asio/detail/select_interrupter.hpp0000644000015300001660000000243513042054732022725 0ustar jenkinsjenkins// // detail/select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__) # include "asio/detail/socket_select_interrupter.hpp" #elif defined(ASIO_HAS_EVENTFD) # include "asio/detail/eventfd_select_interrupter.hpp" #else # include "asio/detail/pipe_select_interrupter.hpp" #endif namespace asio { namespace detail { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__) typedef socket_select_interrupter select_interrupter; #elif defined(ASIO_HAS_EVENTFD) typedef eventfd_select_interrupter select_interrupter; #else typedef pipe_select_interrupter select_interrupter; #endif } // namespace detail } // namespace asio #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_SELECT_INTERRUPTER_HPP galera-3-25.3.20/asio/asio/detail/std_thread.hpp0000644000015300001660000000226613042054732021126 0ustar jenkinsjenkins// // detail/std_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_THREAD_HPP #define ASIO_DETAIL_STD_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_THREAD) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_thread : private noncopyable { public: // Constructor. template std_thread(Function f, unsigned int = 0) : thread_(f) { } // Destructor. ~std_thread() { join(); } // Wait for the thread to exit. void join() { if (thread_.joinable()) thread_.join(); } private: std::thread thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_THREAD) #endif // ASIO_DETAIL_STD_THREAD_HPP galera-3-25.3.20/asio/asio/detail/timer_scheduler.hpp0000644000015300001660000000204513042054732022156 0ustar jenkinsjenkins// // detail/timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_scheduler_fwd.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_timer_scheduler.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_service.hpp" #elif defined(ASIO_HAS_EPOLL) # include "asio/detail/epoll_reactor.hpp" #elif defined(ASIO_HAS_KQUEUE) # include "asio/detail/kqueue_reactor.hpp" #elif defined(ASIO_HAS_DEV_POLL) # include "asio/detail/dev_poll_reactor.hpp" #else # include "asio/detail/select_reactor.hpp" #endif #endif // ASIO_DETAIL_TIMER_SCHEDULER_HPP galera-3-25.3.20/asio/asio/detail/null_signal_blocker.hpp0000644000015300001660000000274613042054732023020 0ustar jenkinsjenkins// // detail/null_signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_signal_blocker : private noncopyable { public: // Constructor blocks all signals for the calling thread. null_signal_blocker() { } // Destructor restores the previous signal mask. ~null_signal_blocker() { } // Block all signals for the calling thread. void block() { } // Restore the previous signal mask. void unblock() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) // || defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP galera-3-25.3.20/asio/asio/detail/posix_mutex.hpp0000644000015300001660000000302313042054732021361 0ustar jenkinsjenkins// // detail/posix_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_MUTEX_HPP #define ASIO_DETAIL_POSIX_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_event; class posix_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. ASIO_DECL posix_mutex(); // Destructor. ~posix_mutex() { ::pthread_mutex_destroy(&mutex_); // Ignore EBUSY. } // Lock the mutex. void lock() { (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL. } // Unlock the mutex. void unlock() { (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL. } private: friend class posix_event; ::pthread_mutex_t mutex_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/socket_ops.hpp0000644000015300001660000002447513042054732021164 0ustar jenkinsjenkins// // detail/socket_ops.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPS_HPP #define ASIO_DETAIL_SOCKET_OPS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/shared_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/weak_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_ops { // Socket state bits. enum { // The user wants a non-blocking socket. user_set_non_blocking = 1, // The socket has been set non-blocking. internal_non_blocking = 2, // Helper "state" used to determine whether the socket is non-blocking. non_blocking = user_set_non_blocking | internal_non_blocking, // User wants connection_aborted errors, which are disabled by default. enable_connection_aborted = 4, // The user set the linger option. Needs to be checked when closing. user_set_linger = 8, // The socket is stream-oriented. stream_oriented = 16, // The socket is datagram-oriented. datagram_oriented = 32, // The socket may have been dup()-ed. possible_dup = 64 }; typedef unsigned char state_type; struct noop_deleter { void operator()(void*) {} }; typedef shared_ptr shared_cancel_token_type; typedef weak_ptr weak_cancel_token_type; #if !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL socket_type accept(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL socket_type sync_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_accept(socket_type s, void* output_buffer, DWORD address_length, socket_addr_type* addr, std::size_t* addrlen, socket_type new_socket, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, socket_type& new_socket); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL int bind(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL int close(socket_type s, state_type& state, bool destruction, asio::error_code& ec); ASIO_DECL bool set_user_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec); ASIO_DECL bool set_internal_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec); ASIO_DECL int shutdown(socket_type s, int what, asio::error_code& ec); ASIO_DECL int connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL void sync_connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_connect(socket_type s, asio::error_code& ec); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_connect(socket_type s, asio::error_code& ec); ASIO_DECL int socketpair(int af, int type, int protocol, socket_type sv[2], asio::error_code& ec); ASIO_DECL bool sockatmark(socket_type s, asio::error_code& ec); ASIO_DECL size_t available(socket_type s, asio::error_code& ec); ASIO_DECL int listen(socket_type s, int backlog, asio::error_code& ec); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef WSABUF buf; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef iovec buf; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ASIO_DECL void init_buf(buf& b, void* data, size_t size); ASIO_DECL void init_buf(buf& b, const void* data, size_t size); ASIO_DECL signed_size_type recv(socket_type s, buf* bufs, size_t count, int flags, asio::error_code& ec); ASIO_DECL size_t sync_recv(socket_type s, state_type state, buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recv(state_type state, const weak_cancel_token_type& cancel_token, bool all_empty, asio::error_code& ec, size_t bytes_transferred); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recv(socket_type s, buf* bufs, size_t count, int flags, bool is_stream, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recvfrom( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec); ASIO_DECL size_t sync_recvmsg(socket_type s, state_type state, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recvmsg( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec); ASIO_DECL size_t sync_send(socket_type s, state_type state, const buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_send( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL size_t sync_sendto(socket_type s, state_type state, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); #if !defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec, size_t& bytes_transferred); #endif // !defined(ASIO_HAS_IOCP) ASIO_DECL socket_type socket(int af, int type, int protocol, asio::error_code& ec); ASIO_DECL int setsockopt(socket_type s, state_type& state, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec); ASIO_DECL int getsockopt(socket_type s, state_type state, int level, int optname, void* optval, size_t* optlen, asio::error_code& ec); ASIO_DECL int getpeername(socket_type s, socket_addr_type* addr, std::size_t* addrlen, bool cached, asio::error_code& ec); ASIO_DECL int getsockname(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL int ioctl(socket_type s, state_type& state, int cmd, ioctl_arg_type* arg, asio::error_code& ec); ASIO_DECL int select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, timeval* timeout, asio::error_code& ec); ASIO_DECL int poll_read(socket_type s, state_type state, asio::error_code& ec); ASIO_DECL int poll_write(socket_type s, state_type state, asio::error_code& ec); ASIO_DECL int poll_connect(socket_type s, asio::error_code& ec); #endif // !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL const char* inet_ntop(int af, const void* src, char* dest, size_t length, unsigned long scope_id, asio::error_code& ec); ASIO_DECL int inet_pton(int af, const char* src, void* dest, unsigned long* scope_id, asio::error_code& ec); ASIO_DECL int gethostname(char* name, int namelen, asio::error_code& ec); #if !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL asio::error_code getaddrinfo(const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec); ASIO_DECL asio::error_code background_getaddrinfo( const weak_cancel_token_type& cancel_token, const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec); ASIO_DECL void freeaddrinfo(addrinfo_type* ai); ASIO_DECL asio::error_code getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec); ASIO_DECL asio::error_code sync_getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec); ASIO_DECL asio::error_code background_getnameinfo( const weak_cancel_token_type& cancel_token, const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec); #endif // !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL u_long_type network_to_host_long(u_long_type value); ASIO_DECL u_long_type host_to_network_long(u_long_type value); ASIO_DECL u_short_type network_to_host_short(u_short_type value); ASIO_DECL u_short_type host_to_network_short(u_short_type value); } // namespace socket_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/socket_ops.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SOCKET_OPS_HPP galera-3-25.3.20/asio/asio/detail/event.hpp0000644000015300001660000000231513042054732020121 0ustar jenkinsjenkins// // detail/event.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EVENT_HPP #define ASIO_DETAIL_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_event.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_event.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_event.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_event.hpp" #else # error Only Windows, POSIX and std::condition_variable are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_event event; #elif defined(ASIO_WINDOWS) typedef win_event event; #elif defined(ASIO_HAS_PTHREADS) typedef posix_event event; #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_event event; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_EVENT_HPP galera-3-25.3.20/asio/asio/detail/kqueue_reactor.hpp0000644000015300001660000001567213042054732022030 0ustar jenkinsjenkins// // detail/kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_KQUEUE_REACTOR_HPP #define ASIO_DETAIL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include #include #include #include #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/object_pool.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" // Older versions of Mac OS X may not define EV_OOBAND. #if !defined(EV_OOBAND) # define EV_OOBAND EV_FLAG1 #endif // !defined(EV_OOBAND) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class kqueue_reactor : public asio::detail::service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor queues. struct descriptor_state { friend class kqueue_reactor; friend class object_pool_access; descriptor_state* next_; descriptor_state* prev_; mutex mutex_; int descriptor_; int num_kevents_; // 1 == read only, 2 == read and write op_queue op_queue_[max_ops]; bool shutdown_; }; // Per-descriptor data. typedef descriptor_state* per_descriptor_data; // Constructor. ASIO_DECL kqueue_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~kqueue_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data, bool closing); // Remote the descriptor's registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run the kqueue loop. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the kqueue loop. ASIO_DECL void interrupt(); private: // Create the kqueue file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_kqueue_create(); // Allocate a new descriptor state object. ASIO_DECL descriptor_state* allocate_descriptor_state(); // Free an existing descriptor state object. ASIO_DECL void free_descriptor_state(descriptor_state* s); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the kevent call. ASIO_DECL timespec* get_timeout(timespec& ts); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. mutex mutex_; // The kqueue file descriptor. int kqueue_fd_; // The interrupter is used to break a blocking kevent call. select_interrupter interrupter_; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; // Mutex to protect access to the registered descriptors. mutex registered_descriptors_mutex_; // Keep track of all registered descriptors. object_pool registered_descriptors_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/kqueue_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/kqueue_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_KQUEUE_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/timer_queue_set.hpp0000644000015300001660000000317213042054732022201 0ustar jenkinsjenkins// // detail/timer_queue_set.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_SET_HPP #define ASIO_DETAIL_TIMER_QUEUE_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class timer_queue_set { public: // Constructor. ASIO_DECL timer_queue_set(); // Add a timer queue to the set. ASIO_DECL void insert(timer_queue_base* q); // Remove a timer queue from the set. ASIO_DECL void erase(timer_queue_base* q); // Determine whether all queues are empty. ASIO_DECL bool all_empty() const; // Get the wait duration in milliseconds. ASIO_DECL long wait_duration_msec(long max_duration) const; // Get the wait duration in microseconds. ASIO_DECL long wait_duration_usec(long max_duration) const; // Dequeue all ready timers. ASIO_DECL void get_ready_timers(op_queue& ops); // Dequeue all timers. ASIO_DECL void get_all_timers(op_queue& ops); private: timer_queue_base* first_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/timer_queue_set.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_TIMER_QUEUE_SET_HPP galera-3-25.3.20/asio/asio/detail/io_control.hpp0000644000015300001660000000514413042054732021152 0ustar jenkinsjenkins// // detail/io_control.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IO_CONTROL_HPP #define ASIO_DETAIL_IO_CONTROL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace io_control { // IO control command for non-blocking I/O. class non_blocking_io { public: // Default constructor. non_blocking_io() : value_(0) { } // Construct with a specific command value. non_blocking_io(bool value) : value_(value ? 1 : 0) { } // Get the name of the IO control command. int name() const { return static_cast(ASIO_OS_DEF(FIONBIO)); } // Set the value of the I/O control command. void set(bool value) { value_ = value ? 1 : 0; } // Get the current value of the I/O control command. bool get() const { return value_ != 0; } // Get the address of the command data. detail::ioctl_arg_type* data() { return &value_; } // Get the address of the command data. const detail::ioctl_arg_type* data() const { return &value_; } private: detail::ioctl_arg_type value_; }; // I/O control command for getting number of bytes available. class bytes_readable { public: // Default constructor. bytes_readable() : value_(0) { } // Construct with a specific command value. bytes_readable(std::size_t value) : value_(static_cast(value)) { } // Get the name of the IO control command. int name() const { return static_cast(ASIO_OS_DEF(FIONREAD)); } // Set the value of the I/O control command. void set(std::size_t value) { value_ = static_cast(value); } // Get the current value of the I/O control command. std::size_t get() const { return static_cast(value_); } // Get the address of the command data. detail::ioctl_arg_type* data() { return &value_; } // Get the address of the command data. const detail::ioctl_arg_type* data() const { return &value_; } private: detail::ioctl_arg_type value_; }; } // namespace io_control } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IO_CONTROL_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_recv_op.hpp0000644000015300001660000000713013042054732023674 0ustar jenkinsjenkins// // detail/win_iocp_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recv_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recv_op); win_iocp_socket_recv_op(socket_ops::state_type state, socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, Handler& handler) : operation(&win_iocp_socket_recv_op::do_complete), state_(state), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recv(o->state_, o->cancel_token_, buffer_sequence_adapter::all_empty(o->buffers_), ec, bytes_transferred); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::state_type state_; socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP galera-3-25.3.20/asio/asio/detail/thread_info_base.hpp0000644000015300001660000000414113042054732022253 0ustar jenkinsjenkins// // detail/thread_info_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_INFO_BASE_HPP #define ASIO_DETAIL_THREAD_INFO_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class thread_info_base : private noncopyable { public: thread_info_base() : reusable_memory_(0) { } ~thread_info_base() { if (reusable_memory_) ::operator delete(reusable_memory_); } static void* allocate(thread_info_base* this_thread, std::size_t size) { if (this_thread && this_thread->reusable_memory_) { void* const pointer = this_thread->reusable_memory_; this_thread->reusable_memory_ = 0; unsigned char* const mem = static_cast(pointer); if (static_cast(mem[0]) >= size) { mem[size] = mem[0]; return pointer; } ::operator delete(pointer); } void* const pointer = ::operator new(size + 1); unsigned char* const mem = static_cast(pointer); mem[size] = (size <= UCHAR_MAX) ? static_cast(size) : 0; return pointer; } static void deallocate(thread_info_base* this_thread, void* pointer, std::size_t size) { if (size <= UCHAR_MAX) { if (this_thread && this_thread->reusable_memory_ == 0) { unsigned char* const mem = static_cast(pointer); mem[0] = mem[size]; this_thread->reusable_memory_ = pointer; return; } } ::operator delete(pointer); } private: void* reusable_memory_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_THREAD_INFO_BASE_HPP galera-3-25.3.20/asio/asio/detail/gcc_arm_fenced_block.hpp0000644000015300001660000000410513042054732023050 0ustar jenkinsjenkins// // detail/gcc_arm_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && defined(__arm__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_arm_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_arm_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_arm_fenced_block(full_t) { barrier(); } // Destructor. ~gcc_arm_fenced_block() { barrier(); } private: static void barrier() { #if defined(__ARM_ARCH_4__) \ || defined(__ARM_ARCH_4T__) \ || defined(__ARM_ARCH_5__) \ || defined(__ARM_ARCH_5E__) \ || defined(__ARM_ARCH_5T__) \ || defined(__ARM_ARCH_5TE__) \ || defined(__ARM_ARCH_5TEJ__) \ || defined(__ARM_ARCH_6__) \ || defined(__ARM_ARCH_6J__) \ || defined(__ARM_ARCH_6K__) \ || defined(__ARM_ARCH_6Z__) \ || defined(__ARM_ARCH_6ZK__) \ || defined(__ARM_ARCH_6T2__) # if defined(__thumb__) // This is just a placeholder and almost certainly not sufficient. __asm__ __volatile__ ("" : : : "memory"); # else // defined(__thumb__) int a = 0, b = 0; __asm__ __volatile__ ("swp %0, %1, [%2]" : "=&r"(a) : "r"(1), "r"(&b) : "memory", "cc"); # endif // defined(__thumb__) #else // ARMv7 and later. __asm__ __volatile__ ("dmb" : : : "memory"); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && defined(__arm__) #endif // ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/config.hpp0000644000015300001660000010760213042054732020252 0ustar jenkinsjenkins// // detail/config.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONFIG_HPP #define ASIO_DETAIL_CONFIG_HPP #if defined(ASIO_STANDALONE) # define ASIO_DISABLE_BOOST_ARRAY 1 # define ASIO_DISABLE_BOOST_ASSERT 1 # define ASIO_DISABLE_BOOST_BIND 1 # define ASIO_DISABLE_BOOST_CHRONO 1 # define ASIO_DISABLE_BOOST_DATE_TIME 1 # define ASIO_DISABLE_BOOST_LIMITS 1 # define ASIO_DISABLE_BOOST_REGEX 1 # define ASIO_DISABLE_BOOST_STATIC_CONSTANT 1 # define ASIO_DISABLE_BOOST_THROW_EXCEPTION 1 # define ASIO_DISABLE_BOOST_WORKAROUND 1 #else // defined(ASIO_STANDALONE) # include # include # define ASIO_HAS_BOOST_CONFIG 1 #endif // defined(ASIO_STANDALONE) // Default to a header-only implementation. The user must specifically request // separate compilation by defining either ASIO_SEPARATE_COMPILATION or // ASIO_DYN_LINK (as a DLL/shared library implies separate compilation). #if !defined(ASIO_HEADER_ONLY) # if !defined(ASIO_SEPARATE_COMPILATION) # if !defined(ASIO_DYN_LINK) # define ASIO_HEADER_ONLY 1 # endif // !defined(ASIO_DYN_LINK) # endif // !defined(ASIO_SEPARATE_COMPILATION) #endif // !defined(ASIO_HEADER_ONLY) #if defined(ASIO_HEADER_ONLY) # define ASIO_DECL inline #else // defined(ASIO_HEADER_ONLY) # if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) // We need to import/export our code only if the user has specifically asked // for it by defining ASIO_DYN_LINK. # if defined(ASIO_DYN_LINK) // Export if this is our own source, otherwise import. # if defined(ASIO_SOURCE) # define ASIO_DECL __declspec(dllexport) # else // defined(ASIO_SOURCE) # define ASIO_DECL __declspec(dllimport) # endif // defined(ASIO_SOURCE) # endif // defined(ASIO_DYN_LINK) # endif // defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) #endif // defined(ASIO_HEADER_ONLY) // If ASIO_DECL isn't defined yet define it now. #if !defined(ASIO_DECL) # define ASIO_DECL #endif // !defined(ASIO_DECL) // Microsoft Visual C++ detection. #if !defined(ASIO_MSVC) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC) # define ASIO_MSVC BOOST_MSVC # elif defined(_MSC_VER) && !defined(__MWERKS__) && !defined(__EDG_VERSION__) # define ASIO_MSVC _MSC_VER # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC) #endif // defined(ASIO_MSVC) // Clang / libc++ detection. #if defined(__clang__) # if (__cplusplus >= 201103) # if __has_include(<__config>) # include <__config> # if defined(_LIBCPP_VERSION) # define ASIO_HAS_CLANG_LIBCXX 1 # endif // defined(_LIBCPP_VERSION) # endif // __has_include(<__config>) # endif // (__cplusplus >= 201103) #endif // defined(__clang__) // Support move construction and assignment on compilers known to allow it. #if !defined(ASIO_HAS_MOVE) # if !defined(ASIO_DISABLE_MOVE) # if defined(__clang__) # if __has_feature(__cxx_rvalue_references__) # define ASIO_HAS_MOVE 1 # endif // __has_feature(__cxx_rvalue_references__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_MOVE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_MOVE 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_MOVE) #endif // !defined(ASIO_HAS_MOVE) // If ASIO_MOVE_CAST isn't defined, and move support is available, define // ASIO_MOVE_ARG and ASIO_MOVE_CAST to take advantage of rvalue // references and perfect forwarding. #if defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST) # define ASIO_MOVE_ARG(type) type&& # define ASIO_MOVE_CAST(type) static_cast # define ASIO_MOVE_CAST2(type1, type2) static_cast #endif // defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST) // If ASIO_MOVE_CAST still isn't defined, default to a C++03-compatible // implementation. Note that older g++ and MSVC versions don't like it when you // pass a non-member function through a const reference, so for most compilers // we'll play it safe and stick with the old approach of passing the handler by // value. #if !defined(ASIO_MOVE_CAST) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # define ASIO_MOVE_ARG(type) const type& # else // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # define ASIO_MOVE_ARG(type) type # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1400) # define ASIO_MOVE_ARG(type) const type& # else // (_MSC_VER >= 1400) # define ASIO_MOVE_ARG(type) type # endif // (_MSC_VER >= 1400) # else # define ASIO_MOVE_ARG(type) type # endif # define ASIO_MOVE_CAST(type) static_cast # define ASIO_MOVE_CAST2(type1, type2) static_cast #endif // !defined(ASIO_MOVE_CAST) // Support variadic templates on compilers known to allow it. #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # if !defined(ASIO_DISABLE_VARIADIC_TEMPLATES) # if defined(__clang__) # if __has_feature(__cxx_variadic_templates__) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // __has_feature(__cxx_variadic_templates__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # endif // !defined(ASIO_DISABLE_VARIADIC_TEMPLATES) #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) // Support constexpr on compilers known to allow it. #if !defined(ASIO_HAS_CONSTEXPR) # if !defined(ASIO_DISABLE_CONSTEXPR) # if defined(__clang__) # if __has_feature(__cxx_constexpr__) # define ASIO_HAS_CONSTEXPR 1 # endif // __has_feature(__cxx_constexr__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CONSTEXPR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # endif // !defined(ASIO_DISABLE_CONSTEXPR) #endif // !defined(ASIO_HAS_CONSTEXPR) #if !defined(ASIO_CONSTEXPR) # if defined(ASIO_HAS_CONSTEXPR) # define ASIO_CONSTEXPR constexpr # else // defined(ASIO_HAS_CONSTEXPR) # define ASIO_CONSTEXPR # endif // defined(ASIO_HAS_CONSTEXPR) #endif // !defined(ASIO_CONSTEXPR) // Standard library support for system errors. #if !defined(ASIO_HAS_STD_SYSTEM_ERROR) # if !defined(ASIO_DISABLE_STD_SYSTEM_ERROR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_SYSTEM_ERROR) #endif // !defined(ASIO_HAS_STD_SYSTEM_ERROR) // Compliant C++11 compilers put noexcept specifiers on error_category members. #if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) # if (BOOST_VERSION >= 105300) # define ASIO_ERROR_CATEGORY_NOEXCEPT BOOST_NOEXCEPT # elif defined(__clang__) # if __has_feature(__cxx_noexcept__) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // __has_feature(__cxx_noexcept__) # elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) # define ASIO_ERROR_CATEGORY_NOEXCEPT # endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) #endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) // Standard library support for arrays. #if !defined(ASIO_HAS_STD_ARRAY) # if !defined(ASIO_DISABLE_STD_ARRAY) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ARRAY 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_ARRAY 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ARRAY 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_ARRAY 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ARRAY) #endif // !defined(ASIO_HAS_STD_ARRAY) // Standard library support for shared_ptr and weak_ptr. #if !defined(ASIO_HAS_STD_SHARED_PTR) # if !defined(ASIO_DISABLE_STD_SHARED_PTR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_SHARED_PTR 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_SHARED_PTR) #endif // !defined(ASIO_HAS_STD_SHARED_PTR) // Standard library support for atomic operations. #if !defined(ASIO_HAS_STD_ATOMIC) # if !defined(ASIO_DISABLE_STD_ATOMIC) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ATOMIC 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_ATOMIC 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ATOMIC 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_ATOMIC 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ATOMIC) #endif // !defined(ASIO_HAS_STD_ATOMIC) // Standard library support for chrono. Some standard libraries (such as the // libstdc++ shipped with gcc 4.6) provide monotonic_clock as per early C++0x // drafts, rather than the eventually standardised name of steady_clock. #if !defined(ASIO_HAS_STD_CHRONO) # if !defined(ASIO_DISABLE_STD_CHRONO) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_CHRONO 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_CHRONO 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_CHRONO 1 # if ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6)) # define ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK 1 # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6)) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_CHRONO 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_CHRONO) #endif // !defined(ASIO_HAS_STD_CHRONO) // Boost support for chrono. #if !defined(ASIO_HAS_BOOST_CHRONO) # if !defined(ASIO_DISABLE_BOOST_CHRONO) # if (BOOST_VERSION >= 104700) # define ASIO_HAS_BOOST_CHRONO 1 # endif // (BOOST_VERSION >= 104700) # endif // !defined(ASIO_DISABLE_BOOST_CHRONO) #endif // !defined(ASIO_HAS_BOOST_CHRONO) // Boost support for the DateTime library. #if !defined(ASIO_HAS_BOOST_DATE_TIME) # if !defined(ASIO_DISABLE_BOOST_DATE_TIME) # define ASIO_HAS_BOOST_DATE_TIME 1 # endif // !defined(ASIO_DISABLE_BOOST_DATE_TIME) #endif // !defined(ASIO_HAS_BOOST_DATE_TIME) // Standard library support for addressof. #if !defined(ASIO_HAS_STD_ADDRESSOF) # if !defined(ASIO_DISABLE_STD_ADDRESSOF) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ADDRESSOF 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ADDRESSOF) #endif // !defined(ASIO_HAS_STD_ADDRESSOF) // Standard library support for the function class. #if !defined(ASIO_HAS_STD_FUNCTION) # if !defined(ASIO_DISABLE_STD_FUNCTION) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_FUNCTION 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_FUNCTION 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_FUNCTION 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_FUNCTION 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_FUNCTION) #endif // !defined(ASIO_HAS_STD_FUNCTION) // Standard library support for type traits. #if !defined(ASIO_HAS_STD_TYPE_TRAITS) # if !defined(ASIO_DISABLE_STD_TYPE_TRAITS) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_TYPE_TRAITS 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_TYPE_TRAITS) #endif // !defined(ASIO_HAS_STD_TYPE_TRAITS) // Standard library support for the cstdint header. #if !defined(ASIO_HAS_CSTDINT) # if !defined(ASIO_DISABLE_CSTDINT) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_CSTDINT 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_CSTDINT 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CSTDINT 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_CSTDINT 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_CSTDINT) #endif // !defined(ASIO_HAS_CSTDINT) // Standard library support for the thread class. #if !defined(ASIO_HAS_STD_THREAD) # if !defined(ASIO_DISABLE_STD_THREAD) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_THREAD 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_THREAD 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_THREAD 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_THREAD 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_THREAD) #endif // !defined(ASIO_HAS_STD_THREAD) // Standard library support for the mutex and condition variable classes. #if !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # if !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR) #endif // !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) // WinRT target. #if !defined(ASIO_WINDOWS_RUNTIME) # if defined(__cplusplus_winrt) # include # if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \ && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # define ASIO_WINDOWS_RUNTIME 1 # endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # endif // defined(__cplusplus_winrt) #endif // !defined(ASIO_WINDOWS_RUNTIME) // Windows target. Excludes WinRT. #if !defined(ASIO_WINDOWS) # if !defined(ASIO_WINDOWS_RUNTIME) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS) # define ASIO_WINDOWS 1 # elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__) # define ASIO_WINDOWS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS) # endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_WINDOWS) // Windows: target OS version. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS) # if defined(_MSC_VER) || defined(__BORLANDC__) # pragma message( \ "Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. For example:\n"\ "- add -D_WIN32_WINNT=0x0501 to the compiler command line; or\n"\ "- add _WIN32_WINNT=0x0501 to your project's Preprocessor Definitions.\n"\ "Assuming _WIN32_WINNT=0x0501 (i.e. Windows XP target).") # else // defined(_MSC_VER) || defined(__BORLANDC__) # warning Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. # warning For example, add -D_WIN32_WINNT=0x0501 to the compiler command line. # warning Assuming _WIN32_WINNT=0x0501 (i.e. Windows XP target). # endif // defined(_MSC_VER) || defined(__BORLANDC__) # define _WIN32_WINNT 0x0501 # endif // !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS) # if defined(_MSC_VER) # if defined(_WIN32) && !defined(WIN32) # if !defined(_WINSOCK2API_) # define WIN32 // Needed for correct types in winsock2.h # else // !defined(_WINSOCK2API_) # error Please define the macro WIN32 in your compiler options # endif // !defined(_WINSOCK2API_) # endif // defined(_WIN32) && !defined(WIN32) # endif // defined(_MSC_VER) # if defined(__BORLANDC__) # if defined(__WIN32__) && !defined(WIN32) # if !defined(_WINSOCK2API_) # define WIN32 // Needed for correct types in winsock2.h # else // !defined(_WINSOCK2API_) # error Please define the macro WIN32 in your compiler options # endif // !defined(_WINSOCK2API_) # endif // defined(__WIN32__) && !defined(WIN32) # endif // defined(__BORLANDC__) # if defined(__CYGWIN__) # if !defined(__USE_W32_SOCKETS) # error You must add -D__USE_W32_SOCKETS to your compiler options. # endif // !defined(__USE_W32_SOCKETS) # endif // defined(__CYGWIN__) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: minimise header inclusion. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(ASIO_NO_WIN32_LEAN_AND_MEAN) # if !defined(WIN32_LEAN_AND_MEAN) # define WIN32_LEAN_AND_MEAN # endif // !defined(WIN32_LEAN_AND_MEAN) # endif // !defined(ASIO_NO_WIN32_LEAN_AND_MEAN) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: suppress definition of "min" and "max" macros. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(ASIO_NO_NOMINMAX) # if !defined(NOMINMAX) # define NOMINMAX 1 # endif // !defined(NOMINMAX) # endif // !defined(ASIO_NO_NOMINMAX) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: IO Completion Ports. #if !defined(ASIO_HAS_IOCP) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400) # if !defined(UNDER_CE) # if !defined(ASIO_DISABLE_IOCP) # define ASIO_HAS_IOCP 1 # endif // !defined(ASIO_DISABLE_IOCP) # endif // !defined(UNDER_CE) # endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400) # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // !defined(ASIO_HAS_IOCP) // On POSIX (and POSIX-like) platforms we need to include unistd.h in order to // get access to the various platform feature macros, e.g. to be able to test // for threads support. #if !defined(ASIO_HAS_UNISTD_H) # if !defined(ASIO_HAS_BOOST_CONFIG) # if defined(unix) \ || defined(__unix) \ || defined(_XOPEN_SOURCE) \ || defined(_POSIX_SOURCE) \ || (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) \ || defined(__linux__) # define ASIO_HAS_UNISTD_H 1 # endif # endif // !defined(ASIO_HAS_BOOST_CONFIG) #endif // !defined(ASIO_HAS_UNISTD_H) #if defined(ASIO_HAS_UNISTD_H) # include #endif // defined(ASIO_HAS_UNISTD_H) // Linux: epoll, eventfd and timerfd. #if defined(__linux__) # include # if !defined(ASIO_HAS_EPOLL) # if !defined(ASIO_DISABLE_EPOLL) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45) # define ASIO_HAS_EPOLL 1 # endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45) # endif // !defined(ASIO_DISABLE_EPOLL) # endif // !defined(ASIO_HAS_EPOLL) # if !defined(ASIO_HAS_EVENTFD) # if !defined(ASIO_DISABLE_EVENTFD) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) # define ASIO_HAS_EVENTFD 1 # endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) # endif // !defined(ASIO_DISABLE_EVENTFD) # endif // !defined(ASIO_HAS_EVENTFD) # if !defined(ASIO_HAS_TIMERFD) # if defined(ASIO_HAS_EPOLL) # if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) # define ASIO_HAS_TIMERFD 1 # endif // (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) # endif // defined(ASIO_HAS_EPOLL) # endif // !defined(ASIO_HAS_TIMERFD) #endif // defined(__linux__) // Mac OS X, FreeBSD, NetBSD, OpenBSD: kqueue. #if (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) # if !defined(ASIO_HAS_KQUEUE) # if !defined(ASIO_DISABLE_KQUEUE) # define ASIO_HAS_KQUEUE 1 # endif // !defined(ASIO_DISABLE_KQUEUE) # endif // !defined(ASIO_HAS_KQUEUE) #endif // (defined(__MACH__) && defined(__APPLE__)) // || defined(__FreeBSD__) // || defined(__NetBSD__) // || defined(__OpenBSD__) // Solaris: /dev/poll. #if defined(__sun) # if !defined(ASIO_HAS_DEV_POLL) # if !defined(ASIO_DISABLE_DEV_POLL) # define ASIO_HAS_DEV_POLL 1 # endif // !defined(ASIO_DISABLE_DEV_POLL) # endif // !defined(ASIO_HAS_DEV_POLL) #endif // defined(__sun) // Serial ports. #if !defined(ASIO_HAS_SERIAL_PORT) # if defined(ASIO_HAS_IOCP) \ || !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # if !defined(__SYMBIAN32__) # if !defined(ASIO_DISABLE_SERIAL_PORT) # define ASIO_HAS_SERIAL_PORT 1 # endif // !defined(ASIO_DISABLE_SERIAL_PORT) # endif // !defined(__SYMBIAN32__) # endif // defined(ASIO_HAS_IOCP) // || !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // !defined(ASIO_HAS_SERIAL_PORT) // Windows: stream handles. #if !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_STREAM_HANDLE 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // Windows: random access handles. #if !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // Windows: object handles. #if !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(UNDER_CE) # define ASIO_HAS_WINDOWS_OBJECT_HANDLE 1 # endif // !defined(UNDER_CE) # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // Windows: OVERLAPPED wrapper. #if !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) # if !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_OVERLAPPED_PTR 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR) #endif // !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) // POSIX: stream-oriented file descriptors. #if !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) # if !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_POSIX_STREAM_DESCRIPTOR 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR) #endif // !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // UNIX domain sockets. #if !defined(ASIO_HAS_LOCAL_SOCKETS) # if !defined(ASIO_DISABLE_LOCAL_SOCKETS) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_LOCAL_SOCKETS 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_LOCAL_SOCKETS) #endif // !defined(ASIO_HAS_LOCAL_SOCKETS) // Can use sigaction() instead of signal(). #if !defined(ASIO_HAS_SIGACTION) # if !defined(ASIO_DISABLE_SIGACTION) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_SIGACTION 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_SIGACTION) #endif // !defined(ASIO_HAS_SIGACTION) // Can use signal(). #if !defined(ASIO_HAS_SIGNAL) # if !defined(ASIO_DISABLE_SIGNAL) # if !defined(UNDER_CE) # define ASIO_HAS_SIGNAL 1 # endif // !defined(UNDER_CE) # endif // !defined(ASIO_DISABLE_SIGNAL) #endif // !defined(ASIO_HAS_SIGNAL) // Can use getaddrinfo() and getnameinfo(). #if !defined(ASIO_HAS_GETADDRINFO) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501) # define ASIO_HAS_GETADDRINFO 1 # elif defined(UNDER_CE) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(UNDER_CE) # elif !(defined(__MACH__) && defined(__APPLE__)) # define ASIO_HAS_GETADDRINFO 1 # endif // !(defined(__MACH__) && defined(__APPLE__)) #endif // !defined(ASIO_HAS_GETADDRINFO) // Whether standard iostreams are disabled. #if !defined(ASIO_NO_IOSTREAM) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_IOSTREAM) # define ASIO_NO_IOSTREAM 1 # endif // !defined(BOOST_NO_IOSTREAM) #endif // !defined(ASIO_NO_IOSTREAM) // Whether exception handling is disabled. #if !defined(ASIO_NO_EXCEPTIONS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_EXCEPTIONS) # define ASIO_NO_EXCEPTIONS 1 # endif // !defined(BOOST_NO_EXCEPTIONS) #endif // !defined(ASIO_NO_EXCEPTIONS) // Whether the typeid operator is supported. #if !defined(ASIO_NO_TYPEID) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_TYPEID) # define ASIO_NO_TYPEID 1 # endif // !defined(BOOST_NO_TYPEID) #endif // !defined(ASIO_NO_TYPEID) // Threads. #if !defined(ASIO_HAS_THREADS) # if !defined(ASIO_DISABLE_THREADS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS) # define ASIO_HAS_THREADS 1 # elif defined(_MSC_VER) && defined(_MT) # define ASIO_HAS_THREADS 1 # elif defined(__BORLANDC__) && defined(__MT__) # define ASIO_HAS_THREADS 1 # elif defined(_POSIX_THREADS) # define ASIO_HAS_THREADS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS) # endif // !defined(ASIO_DISABLE_THREADS) #endif // !defined(ASIO_HAS_THREADS) // POSIX threads. #if !defined(ASIO_HAS_PTHREADS) # if defined(ASIO_HAS_THREADS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS) # define ASIO_HAS_PTHREADS 1 # elif defined(_POSIX_THREADS) # define ASIO_HAS_PTHREADS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS) # endif // defined(ASIO_HAS_THREADS) #endif // !defined(ASIO_HAS_PTHREADS) // Helper to prevent macro expansion. #define ASIO_PREVENT_MACRO_SUBSTITUTION // Helper to define in-class constants. #if !defined(ASIO_STATIC_CONSTANT) # if !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) # define ASIO_STATIC_CONSTANT(type, assignment) \ BOOST_STATIC_CONSTANT(type, assignment) # else // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) # define ASIO_STATIC_CONSTANT(type, assignment) \ static const type assignment # endif // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) #endif // !defined(ASIO_STATIC_CONSTANT) // Boost array library. #if !defined(ASIO_HAS_BOOST_ARRAY) # if !defined(ASIO_DISABLE_BOOST_ARRAY) # define ASIO_HAS_BOOST_ARRAY 1 # endif // !defined(ASIO_DISABLE_BOOST_ARRAY) #endif // !defined(ASIO_HAS_BOOST_ARRAY) // Boost assert macro. #if !defined(ASIO_HAS_BOOST_ASSERT) # if !defined(ASIO_DISABLE_BOOST_ASSERT) # define ASIO_HAS_BOOST_ASSERT 1 # endif // !defined(ASIO_DISABLE_BOOST_ASSERT) #endif // !defined(ASIO_HAS_BOOST_ASSERT) // Boost limits header. #if !defined(ASIO_HAS_BOOST_LIMITS) # if !defined(ASIO_DISABLE_BOOST_LIMITS) # define ASIO_HAS_BOOST_LIMITS 1 # endif // !defined(ASIO_DISABLE_BOOST_LIMITS) #endif // !defined(ASIO_HAS_BOOST_LIMITS) // Boost throw_exception function. #if !defined(ASIO_HAS_BOOST_THROW_EXCEPTION) # if !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION) # define ASIO_HAS_BOOST_THROW_EXCEPTION 1 # endif // !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION) #endif // !defined(ASIO_HAS_BOOST_THROW_EXCEPTION) // Boost regex library. #if !defined(ASIO_HAS_BOOST_REGEX) # if !defined(ASIO_DISABLE_BOOST_REGEX) # define ASIO_HAS_BOOST_REGEX 1 # endif // !defined(ASIO_DISABLE_BOOST_REGEX) #endif // !defined(ASIO_HAS_BOOST_REGEX) // Boost bind function. #if !defined(ASIO_HAS_BOOST_BIND) # if !defined(ASIO_DISABLE_BOOST_BIND) # define ASIO_HAS_BOOST_BIND 1 # endif // !defined(ASIO_DISABLE_BOOST_BIND) #endif // !defined(ASIO_HAS_BOOST_BIND) // Boost's BOOST_WORKAROUND macro. #if !defined(ASIO_HAS_BOOST_WORKAROUND) # if !defined(ASIO_DISABLE_BOOST_WORKAROUND) # define ASIO_HAS_BOOST_WORKAROUND 1 # endif // !defined(ASIO_DISABLE_BOOST_WORKAROUND) #endif // !defined(ASIO_HAS_BOOST_WORKAROUND) // Microsoft Visual C++'s secure C runtime library. #if !defined(ASIO_HAS_SECURE_RTL) # if !defined(ASIO_DISABLE_SECURE_RTL) # if defined(ASIO_MSVC) \ && (ASIO_MSVC >= 1400) \ && !defined(UNDER_CE) # define ASIO_HAS_SECURE_RTL 1 # endif // defined(ASIO_MSVC) // && (ASIO_MSVC >= 1400) // && !defined(UNDER_CE) # endif // !defined(ASIO_DISABLE_SECURE_RTL) #endif // !defined(ASIO_HAS_SECURE_RTL) // Handler hooking. Disabled for ancient Borland C++ and gcc compilers. #if !defined(ASIO_HAS_HANDLER_HOOKS) # if !defined(ASIO_DISABLE_HANDLER_HOOKS) # if defined(__GNUC__) # if (__GNUC__ >= 3) # define ASIO_HAS_HANDLER_HOOKS 1 # endif // (__GNUC__ >= 3) # elif !defined(__BORLANDC__) # define ASIO_HAS_HANDLER_HOOKS 1 # endif // !defined(__BORLANDC__) # endif // !defined(ASIO_DISABLE_HANDLER_HOOKS) #endif // !defined(ASIO_HAS_HANDLER_HOOKS) // Support for the __thread keyword extension. #if !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION) # if defined(__linux__) # if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # if ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) # if !defined(__INTEL_COMPILER) && !defined(__ICL) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # define ASIO_THREAD_KEYWORD __thread # elif defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # endif // defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) # endif // ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) # endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # endif // defined(__linux__) # if defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME) # if (_MSC_VER >= 1700) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # define ASIO_THREAD_KEYWORD __declspec(thread) # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION) #if !defined(ASIO_THREAD_KEYWORD) # define ASIO_THREAD_KEYWORD __thread #endif // !defined(ASIO_THREAD_KEYWORD) // Support for POSIX ssize_t typedef. #if !defined(ASIO_DISABLE_SSIZE_T) # if defined(__linux__) \ || (defined(__MACH__) && defined(__APPLE__)) # define ASIO_HAS_SSIZE_T 1 # endif // defined(__linux__) // || (defined(__MACH__) && defined(__APPLE__)) #endif // !defined(ASIO_DISABLE_SSIZE_T) #endif // ASIO_DETAIL_CONFIG_HPP galera-3-25.3.20/asio/asio/detail/array_fwd.hpp0000644000015300001660000000162213042054732020756 0ustar jenkinsjenkins// // detail/array_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ARRAY_FWD_HPP #define ASIO_DETAIL_ARRAY_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace boost { template class array; } // namespace boost // Standard library components can't be forward declared, so we'll have to // include the array header. Fortunately, it's fairly lightweight and doesn't // add significantly to the compile time. #if defined(ASIO_HAS_STD_ARRAY) # include #endif // defined(ASIO_HAS_STD_ARRAY) #endif // ASIO_DETAIL_ARRAY_FWD_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_recvmsg_op.hpp0000644000015300001660000000776713042054732024416 0ustar jenkinsjenkins// // detail/reactive_socket_recvmsg_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recvmsg_op_base : public reactor_op { public: reactive_socket_recvmsg_op_base(socket_type socket, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, func_type complete_func) : reactor_op(&reactive_socket_recvmsg_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), in_flags_(in_flags), out_flags_(out_flags) { } static bool do_perform(reactor_op* base) { reactive_socket_recvmsg_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_recvmsg(o->socket_, bufs.buffers(), bufs.count(), o->in_flags_, o->out_flags_, o->ec_, o->bytes_transferred_); } private: socket_type socket_; MutableBufferSequence buffers_; socket_base::message_flags in_flags_; socket_base::message_flags& out_flags_; }; template class reactive_socket_recvmsg_op : public reactive_socket_recvmsg_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvmsg_op); reactive_socket_recvmsg_op(socket_type socket, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) : reactive_socket_recvmsg_op_base(socket, buffers, in_flags, out_flags, &reactive_socket_recvmsg_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recvmsg_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP galera-3-25.3.20/asio/asio/detail/reactor_op_queue.hpp0000644000015300001660000001135213042054732022342 0ustar jenkinsjenkins// // detail/reactor_op_queue.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_OP_QUEUE_HPP #define ASIO_DETAIL_REACTOR_OP_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/hash_map.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactor_op_queue : private noncopyable { public: typedef Descriptor key_type; struct mapped_type : op_queue { mapped_type() {} mapped_type(const mapped_type&) {} void operator=(const mapped_type&) {} }; typedef typename hash_map::value_type value_type; typedef typename hash_map::iterator iterator; // Constructor. reactor_op_queue() : operations_() { } // Obtain iterators to all registered descriptors. iterator begin() { return operations_.begin(); } iterator end() { return operations_.end(); } // Add a new operation to the queue. Returns true if this is the only // operation for the given descriptor, in which case the reactor's event // demultiplexing function call may need to be interrupted and restarted. bool enqueue_operation(Descriptor descriptor, reactor_op* op) { std::pair entry = operations_.insert(value_type(descriptor, mapped_type())); entry.first->second.push(op); return entry.second; } // Cancel all operations associated with the descriptor identified by the // supplied iterator. Any operations pending for the descriptor will be // cancelled. Returns true if any operations were cancelled, in which case // the reactor's event demultiplexing function may need to be interrupted and // restarted. bool cancel_operations(iterator i, op_queue& ops, const asio::error_code& ec = asio::error::operation_aborted) { if (i != operations_.end()) { while (reactor_op* op = i->second.front()) { op->ec_ = ec; i->second.pop(); ops.push(op); } operations_.erase(i); return true; } return false; } // Cancel all operations associated with the descriptor. Any operations // pending for the descriptor will be cancelled. Returns true if any // operations were cancelled, in which case the reactor's event // demultiplexing function may need to be interrupted and restarted. bool cancel_operations(Descriptor descriptor, op_queue& ops, const asio::error_code& ec = asio::error::operation_aborted) { return this->cancel_operations(operations_.find(descriptor), ops, ec); } // Whether there are no operations in the queue. bool empty() const { return operations_.empty(); } // Determine whether there are any operations associated with the descriptor. bool has_operation(Descriptor descriptor) const { return operations_.find(descriptor) != operations_.end(); } // Perform the operations corresponding to the descriptor identified by the // supplied iterator. Returns true if there are still unfinished operations // queued for the descriptor. bool perform_operations(iterator i, op_queue& ops) { if (i != operations_.end()) { while (reactor_op* op = i->second.front()) { if (op->perform()) { i->second.pop(); ops.push(op); } else { return true; } } operations_.erase(i); } return false; } // Perform the operations corresponding to the descriptor. Returns true if // there are still unfinished operations queued for the descriptor. bool perform_operations(Descriptor descriptor, op_queue& ops) { return this->perform_operations(operations_.find(descriptor), ops); } // Get all operations owned by the queue. void get_all_operations(op_queue& ops) { iterator i = operations_.begin(); while (i != operations_.end()) { iterator op_iter = i++; ops.push(op_iter->second); operations_.erase(op_iter); } } private: // The operations that are currently executing asynchronously. hash_map operations_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTOR_OP_QUEUE_HPP galera-3-25.3.20/asio/asio/detail/function.hpp0000644000015300001660000000166513042054732020634 0ustar jenkinsjenkins// // detail/function.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FUNCTION_HPP #define ASIO_DETAIL_FUNCTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_FUNCTION) # include #else // defined(ASIO_HAS_STD_FUNCTION) # include #endif // defined(ASIO_HAS_STD_FUNCTION) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_FUNCTION) using std::function; #else // defined(ASIO_HAS_STD_FUNCTION) using boost::function; #endif // defined(ASIO_HAS_STD_FUNCTION) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_FUNCTION_HPP galera-3-25.3.20/asio/asio/detail/completion_handler.hpp0000644000015300001660000000455013042054732022651 0ustar jenkinsjenkins// // detail/completion_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_COMPLETION_HANDLER_HPP #define ASIO_DETAIL_COMPLETION_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/addressof.hpp" #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class completion_handler : public operation { public: ASIO_DEFINE_HANDLER_PTR(completion_handler); completion_handler(Handler& h) : operation(&completion_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. completion_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; ASIO_HANDLER_COMPLETION((h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. Handler handler(ASIO_MOVE_CAST(Handler)(h->handler_)); p.h = asio::detail::addressof(handler); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN(()); asio_handler_invoke_helpers::invoke(handler, handler); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_COMPLETION_HANDLER_HPP galera-3-25.3.20/asio/asio/detail/bind_handler.hpp0000644000015300001660000003163413042054732021417 0ustar jenkinsjenkins// // detail/bind_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BIND_HANDLER_HPP #define ASIO_DETAIL_BIND_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class binder1 { public: binder1(const Handler& handler, const Arg1& arg1) : handler_(handler), arg1_(arg1) { } binder1(Handler& handler, const Arg1& arg1) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1) { } void operator()() { handler_(static_cast(arg1_)); } void operator()() const { handler_(arg1_); } //private: Handler handler_; Arg1 arg1_; }; template inline void* asio_handler_allocate(std::size_t size, binder1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder1* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder1 bind_handler(Handler handler, const Arg1& arg1) { return binder1(handler, arg1); } template class binder2 { public: binder2(const Handler& handler, const Arg1& arg1, const Arg2& arg2) : handler_(handler), arg1_(arg1), arg2_(arg2) { } binder2(Handler& handler, const Arg1& arg1, const Arg2& arg2) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_)); } void operator()() const { handler_(arg1_, arg2_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; }; template inline void* asio_handler_allocate(std::size_t size, binder2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder2* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder2 bind_handler(Handler handler, const Arg1& arg1, const Arg2& arg2) { return binder2(handler, arg1, arg2); } template class binder3 { public: binder3(const Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) : handler_(handler), arg1_(arg1), arg2_(arg2), arg3_(arg3) { } binder3(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_)); } void operator()() const { handler_(arg1_, arg2_, arg3_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; }; template inline void* asio_handler_allocate(std::size_t size, binder3* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder3* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder3* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder3* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder3* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder3 bind_handler(Handler handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) { return binder3(handler, arg1, arg2, arg3); } template class binder4 { public: binder4(const Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) : handler_(handler), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4) { } binder4(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_), static_cast(arg4_)); } void operator()() const { handler_(arg1_, arg2_, arg3_, arg4_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; Arg4 arg4_; }; template inline void* asio_handler_allocate(std::size_t size, binder4* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder4* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder4* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder4* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder4* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder4 bind_handler( Handler handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) { return binder4(handler, arg1, arg2, arg3, arg4); } template class binder5 { public: binder5(const Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) : handler_(handler), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4), arg5_(arg5) { } binder5(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4), arg5_(arg5) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_), static_cast(arg4_), static_cast(arg5_)); } void operator()() const { handler_(arg1_, arg2_, arg3_, arg4_, arg5_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; Arg4 arg4_; Arg5 arg5_; }; template inline void* asio_handler_allocate(std::size_t size, binder5* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder5* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder5* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder5* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder5* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder5 bind_handler( Handler handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) { return binder5(handler, arg1, arg2, arg3, arg4, arg5); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BIND_HANDLER_HPP galera-3-25.3.20/asio/asio/detail/std_static_mutex.hpp0000644000015300001660000000270213042054732022363 0ustar jenkinsjenkins// // detail/std_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_STATIC_MUTEX_HPP #define ASIO_DETAIL_STD_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event; class std_static_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. std_static_mutex(int) { } // Destructor. ~std_static_mutex() { } // Initialise the mutex. void init() { // Nothing to do. } // Lock the mutex. void lock() { mutex_.lock(); } // Unlock the mutex. void unlock() { mutex_.unlock(); } private: friend class std_event; std::mutex mutex_; }; #define ASIO_STD_STATIC_MUTEX_INIT 0 } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_STATIC_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/win_tss_ptr.hpp0000644000015300001660000000312513042054732021353 0ustar jenkinsjenkins// // detail/win_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_TSS_PTR_HPP #define ASIO_DETAIL_WIN_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper function to create thread-specific storage. ASIO_DECL DWORD win_tss_ptr_create(); template class win_tss_ptr : private noncopyable { public: // Constructor. win_tss_ptr() : tss_key_(win_tss_ptr_create()) { } // Destructor. ~win_tss_ptr() { ::TlsFree(tss_key_); } // Get the value. operator T*() const { return static_cast(::TlsGetValue(tss_key_)); } // Set the value. void operator=(T* value) { ::TlsSetValue(tss_key_, value); } private: // Thread-specific storage to allow unlocked access to determine whether a // thread is a member of the pool. DWORD tss_key_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_tss_ptr.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_TSS_PTR_HPP galera-3-25.3.20/asio/asio/detail/buffer_sequence_adapter.hpp0000644000015300001660000002277413042054732023654 0ustar jenkinsjenkins// // detail/buffer_sequence_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP #define ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class buffer_sequence_adapter_base { protected: #if defined(ASIO_WINDOWS_RUNTIME) // The maximum number of buffers to support in a single operation. enum { max_buffers = 1 }; typedef Windows::Storage::Streams::IBuffer^ native_buffer_type; ASIO_DECL static void init_native_buffer( native_buffer_type& buf, const asio::mutable_buffer& buffer); ASIO_DECL static void init_native_buffer( native_buffer_type& buf, const asio::const_buffer& buffer); #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) // The maximum number of buffers to support in a single operation. enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len }; typedef WSABUF native_buffer_type; static void init_native_buffer(WSABUF& buf, const asio::mutable_buffer& buffer) { buf.buf = asio::buffer_cast(buffer); buf.len = static_cast(asio::buffer_size(buffer)); } static void init_native_buffer(WSABUF& buf, const asio::const_buffer& buffer) { buf.buf = const_cast(asio::buffer_cast(buffer)); buf.len = static_cast(asio::buffer_size(buffer)); } #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // The maximum number of buffers to support in a single operation. enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len }; typedef iovec native_buffer_type; static void init_iov_base(void*& base, void* addr) { base = addr; } template static void init_iov_base(T& base, void* addr) { base = static_cast(addr); } static void init_native_buffer(iovec& iov, const asio::mutable_buffer& buffer) { init_iov_base(iov.iov_base, asio::buffer_cast(buffer)); iov.iov_len = asio::buffer_size(buffer); } static void init_native_buffer(iovec& iov, const asio::const_buffer& buffer) { init_iov_base(iov.iov_base, const_cast( asio::buffer_cast(buffer))); iov.iov_len = asio::buffer_size(buffer); } #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) }; // Helper class to translate buffers into the native buffer representation. template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter(const Buffers& buffer_sequence) : count_(0), total_buffer_size_(0) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); for (; iter != end && count_ < max_buffers; ++iter, ++count_) { Buffer buffer(*iter); init_native_buffer(buffers_[count_], buffer); total_buffer_size_ += asio::buffer_size(buffer); } } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return count_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const Buffers& buffer_sequence) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); std::size_t i = 0; for (; iter != end && i < max_buffers; ++iter, ++i) if (asio::buffer_size(Buffer(*iter)) > 0) return false; return true; } static void validate(const Buffers& buffer_sequence) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); for (; iter != end; ++iter) { Buffer buffer(*iter); asio::buffer_cast(buffer); } } static Buffer first(const Buffers& buffer_sequence) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); for (; iter != end; ++iter) { Buffer buffer(*iter); if (asio::buffer_size(buffer) != 0) return buffer; } return Buffer(); } private: native_buffer_type buffers_[max_buffers]; std::size_t count_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::mutable_buffers_1& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = asio::buffer_size(buffer_sequence); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::mutable_buffers_1& buffer_sequence) { return asio::buffer_size(buffer_sequence) == 0; } static void validate(const asio::mutable_buffers_1& buffer_sequence) { asio::buffer_cast(buffer_sequence); } static Buffer first(const asio::mutable_buffers_1& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::const_buffers_1& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = asio::buffer_size(buffer_sequence); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::const_buffers_1& buffer_sequence) { return asio::buffer_size(buffer_sequence) == 0; } static void validate(const asio::const_buffers_1& buffer_sequence) { asio::buffer_cast(buffer_sequence); } static Buffer first(const asio::const_buffers_1& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter > : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const boost::array& buffer_sequence) { init_native_buffer(buffers_[0], Buffer(buffer_sequence[0])); init_native_buffer(buffers_[1], Buffer(buffer_sequence[1])); total_buffer_size_ = asio::buffer_size(buffer_sequence[0]) + asio::buffer_size(buffer_sequence[1]); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return 2; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const boost::array& buffer_sequence) { return asio::buffer_size(buffer_sequence[0]) == 0 && asio::buffer_size(buffer_sequence[1]) == 0; } static void validate(const boost::array& buffer_sequence) { asio::buffer_cast(buffer_sequence[0]); asio::buffer_cast(buffer_sequence[1]); } static Buffer first(const boost::array& buffer_sequence) { return Buffer(asio::buffer_size(buffer_sequence[0]) != 0 ? buffer_sequence[0] : buffer_sequence[1]); } private: native_buffer_type buffers_[2]; std::size_t total_buffer_size_; }; #if defined(ASIO_HAS_STD_ARRAY) template class buffer_sequence_adapter > : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const std::array& buffer_sequence) { init_native_buffer(buffers_[0], Buffer(buffer_sequence[0])); init_native_buffer(buffers_[1], Buffer(buffer_sequence[1])); total_buffer_size_ = asio::buffer_size(buffer_sequence[0]) + asio::buffer_size(buffer_sequence[1]); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return 2; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const std::array& buffer_sequence) { return asio::buffer_size(buffer_sequence[0]) == 0 && asio::buffer_size(buffer_sequence[1]) == 0; } static void validate(const std::array& buffer_sequence) { asio::buffer_cast(buffer_sequence[0]); asio::buffer_cast(buffer_sequence[1]); } static Buffer first(const std::array& buffer_sequence) { return Buffer(asio::buffer_size(buffer_sequence[0]) != 0 ? buffer_sequence[0] : buffer_sequence[1]); } private: native_buffer_type buffers_[2]; std::size_t total_buffer_size_; }; #endif // defined(ASIO_HAS_STD_ARRAY) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/buffer_sequence_adapter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP galera-3-25.3.20/asio/asio/detail/reactive_socket_connect_op.hpp0000644000015300001660000000616113042054732024364 0ustar jenkinsjenkins// // detail/reactive_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_socket_connect_op_base : public reactor_op { public: reactive_socket_connect_op_base(socket_type socket, func_type complete_func) : reactor_op(&reactive_socket_connect_op_base::do_perform, complete_func), socket_(socket) { } static bool do_perform(reactor_op* base) { reactive_socket_connect_op_base* o( static_cast(base)); return socket_ops::non_blocking_connect(o->socket_, o->ec_); } private: socket_type socket_; }; template class reactive_socket_connect_op : public reactive_socket_connect_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_connect_op); reactive_socket_connect_op(socket_type socket, Handler& handler) : reactive_socket_connect_op_base(socket, &reactive_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_connect_op* o (static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP galera-3-25.3.20/asio/asio/detail/gcc_hppa_fenced_block.hpp0000644000015300001660000000263713042054732023231 0ustar jenkinsjenkins// // detail/gcc_hppa_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_hppa_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_hppa_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_hppa_fenced_block(full_t) { barrier(); } // Destructor. ~gcc_hppa_fenced_block() { barrier(); } private: static void barrier() { // This is just a placeholder and almost certainly not sufficient. __asm__ __volatile__ ("" : : : "memory"); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) #endif // ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/null_fenced_block.hpp0000644000015300001660000000164113042054732022431 0ustar jenkinsjenkins// // detail/null_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_FENCED_BLOCK_HPP #define ASIO_DETAIL_NULL_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_fenced_block : private noncopyable { public: enum half_or_full_t { half, full }; // Constructor. explicit null_fenced_block(half_or_full_t) { } // Destructor. ~null_fenced_block() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NULL_FENCED_BLOCK_HPP galera-3-25.3.20/asio/asio/detail/socket_option.hpp0000644000015300001660000001421413042054732021661 0ustar jenkinsjenkins// // detail/socket_option.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPTION_HPP #define ASIO_DETAIL_SOCKET_OPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_option { // Helper template for implementing boolean-based options. template class boolean { public: // Default constructor. boolean() : value_(0) { } // Construct with a specific option value. explicit boolean(bool v) : value_(v ? 1 : 0) { } // Set the current value of the boolean. boolean& operator=(bool v) { value_ = v ? 1 : 0; return *this; } // Get the current value of the boolean. bool value() const { return !!value_; } // Convert to bool. operator bool() const { return !!value_; } // Test for false. bool operator!() const { return !value_; } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the boolean data. template int* data(const Protocol&) { return &value_; } // Get the address of the boolean data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the boolean data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the boolean data. template void resize(const Protocol&, std::size_t s) { // On some platforms (e.g. Windows Vista), the getsockopt function will // return the size of a boolean socket option as one byte, even though a // four byte integer was passed in. switch (s) { case sizeof(char): value_ = *reinterpret_cast(&value_) ? 1 : 0; break; case sizeof(value_): break; default: { std::length_error ex("boolean socket option resize"); asio::detail::throw_exception(ex); } } } private: int value_; }; // Helper template for implementing integer options. template class integer { public: // Default constructor. integer() : value_(0) { } // Construct with a specific option value. explicit integer(int v) : value_(v) { } // Set the value of the int option. integer& operator=(int v) { value_ = v; return *this; } // Get the current value of the int option. int value() const { return value_; } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the int data. template int* data(const Protocol&) { return &value_; } // Get the address of the int data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the int data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the int data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("integer socket option resize"); asio::detail::throw_exception(ex); } } private: int value_; }; // Helper template for implementing linger options. template class linger { public: // Default constructor. linger() { value_.l_onoff = 0; value_.l_linger = 0; } // Construct with specific option values. linger(bool e, int t) { enabled(e); timeout ASIO_PREVENT_MACRO_SUBSTITUTION(t); } // Set the value for whether linger is enabled. void enabled(bool value) { value_.l_onoff = value ? 1 : 0; } // Get the value for whether linger is enabled. bool enabled() const { return value_.l_onoff != 0; } // Set the value for the linger timeout. void timeout ASIO_PREVENT_MACRO_SUBSTITUTION(int value) { #if defined(WIN32) value_.l_linger = static_cast(value); #else value_.l_linger = value; #endif } // Get the value for the linger timeout. int timeout ASIO_PREVENT_MACRO_SUBSTITUTION() const { return static_cast(value_.l_linger); } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the linger data. template detail::linger_type* data(const Protocol&) { return &value_; } // Get the address of the linger data. template const detail::linger_type* data(const Protocol&) const { return &value_; } // Get the size of the linger data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the int data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("linger socket option resize"); asio::detail::throw_exception(ex); } } private: detail::linger_type value_; }; } // namespace socket_option } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_OPTION_HPP galera-3-25.3.20/asio/asio/detail/eventfd_select_interrupter.hpp0000644000015300001660000000450713042054732024442 0ustar jenkinsjenkins// // detail/eventfd_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EVENTFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class eventfd_select_interrupter { public: // Constructor. ASIO_DECL eventfd_select_interrupter(); // Destructor. ASIO_DECL ~eventfd_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // 64bit value will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // 64bit non-zero value may be written to this to wake up the select which is // waiting for the other end to become readable. This descriptor will only // differ from the read descriptor when a pipe is used. int write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/eventfd_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_EVENTFD) #endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP galera-3-25.3.20/asio/asio/detail/winrt_socket_recv_op.hpp0000644000015300001660000000663613042054732023242 0ustar jenkinsjenkins// // detail/winrt_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_recv_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_recv_op); winrt_socket_recv_op(const MutableBufferSequence& buffers, Handler& handler) : winrt_async_op( &winrt_socket_recv_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) std::size_t bytes_transferred = o->result_ ? o->result_->Length : 0; if (bytes_transferred == 0 && !o->ec_ && !buffer_sequence_adapter::all_empty(o->buffers_)) { o->ec_ = asio::error::eof; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP galera-3-25.3.20/asio/asio/detail/signal_init.hpp0000644000015300001660000000172713042054732021306 0ustar jenkinsjenkins// // detail/signal_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_INIT_HPP #define ASIO_DETAIL_SIGNAL_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class signal_init { public: // Constructor. signal_init() { std::signal(Signal, SIG_IGN); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_SIGNAL_INIT_HPP galera-3-25.3.20/asio/asio/detail/handler_invoke_helpers.hpp0000644000015300001660000000315213042054732023512 0ustar jenkinsjenkins// // detail/handler_invoke_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP #define ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/handler_invoke_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_invoke must be made from a namespace that does not // contain overloads of this function. The asio_handler_invoke_helpers // namespace is defined here for that purpose. namespace asio_handler_invoke_helpers { template inline void invoke(Function& function, Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) Function tmp(function); tmp(); #else using asio::asio_handler_invoke; asio_handler_invoke(function, asio::detail::addressof(context)); #endif } template inline void invoke(const Function& function, Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) Function tmp(function); tmp(); #else using asio::asio_handler_invoke; asio_handler_invoke(function, asio::detail::addressof(context)); #endif } } // namespace asio_handler_invoke_helpers #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP galera-3-25.3.20/asio/asio/detail/type_traits.hpp0000644000015300001660000000325213042054732021350 0ustar jenkinsjenkins// // detail/type_traits.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TYPE_TRAITS_HPP #define ASIO_DETAIL_TYPE_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_TYPE_TRAITS) # include #else // defined(ASIO_HAS_TYPE_TRAITS) # include # include # include # include # include # include # include # include #endif // defined(ASIO_HAS_TYPE_TRAITS) namespace asio { #if defined(ASIO_HAS_STD_TYPE_TRAITS) using std::add_const; using std::enable_if; using std::is_const; using std::is_convertible; using std::is_function; using std::is_same; using std::remove_pointer; using std::remove_reference; #else // defined(ASIO_HAS_STD_TYPE_TRAITS) using boost::add_const; template struct enable_if : boost::enable_if_c {}; using boost::is_const; using boost::is_convertible; using boost::is_function; using boost::is_same; using boost::remove_pointer; using boost::remove_reference; #endif // defined(ASIO_HAS_STD_TYPE_TRAITS) } // namespace asio #endif // ASIO_DETAIL_TYPE_TRAITS_HPP galera-3-25.3.20/asio/asio/detail/winrt_resolver_service.hpp0000644000015300001660000001172613042054732023612 0ustar jenkinsjenkins// // detail/winrt_resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP #define ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/winrt_async_manager.hpp" #include "asio/detail/winrt_resolve_op.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_resolver_service { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the asynchronous operation that the operation has been // cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The query type. typedef asio::ip::basic_resolver_query query_type; // The iterator type. typedef asio::ip::basic_resolver_iterator iterator_type; // Constructor. winrt_resolver_service(asio::io_service& io_service) : io_service_(use_service(io_service)), async_manager_(use_service(io_service)) { } // Destructor. ~winrt_resolver_service() { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Perform any fork-related housekeeping. void fork_service(asio::io_service::fork_event) { } // Construct a new resolver implementation. void construct(implementation_type&) { } // Destroy a resolver implementation. void destroy(implementation_type&) { } // Cancel pending asynchronous operations. void cancel(implementation_type&) { } // Resolve a query to a list of entries. iterator_type resolve(implementation_type&, const query_type& query, asio::error_code& ec) { try { using namespace Windows::Networking::Sockets; auto endpoint_pairs = async_manager_.sync( DatagramSocket::GetEndpointPairsAsync( winrt_utils::host_name(query.host_name()), winrt_utils::string(query.service_name())), ec); if (ec) return iterator_type(); return iterator_type::create( endpoint_pairs, query.hints(), query.host_name(), query.service_name()); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return iterator_type(); } } // Asynchronously resolve a query to a list of entries. template void async_resolve(implementation_type&, const query_type& query, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_resolve_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(query, handler); ASIO_HANDLER_CREATION((p.p, "resolver", &impl, "async_resolve")); try { using namespace Windows::Networking::Sockets; async_manager_.async(DatagramSocket::GetEndpointPairsAsync( winrt_utils::host_name(query.host_name()), winrt_utils::string(query.service_name())), p.p); p.v = p.p = 0; } catch (Platform::Exception^ e) { p.p->ec_ = asio::error_code( e->HResult, asio::system_category()); io_service_.post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; } } // Resolve an endpoint to a list of entries. iterator_type resolve(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return iterator_type(); } // Asynchronously resolve an endpoint to a list of entries. template void async_resolve(implementation_type&, const endpoint_type&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const iterator_type iterator; io_service_.get_io_service().post( detail::bind_handler(handler, ec, iterator)); } private: io_service_impl& io_service_; winrt_async_manager& async_manager_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/base_from_completion_cond.hpp0000644000015300001660000000315313042054732024172 0ustar jenkinsjenkins// // detail/base_from_completion_cond.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP #define ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class base_from_completion_cond { protected: explicit base_from_completion_cond(CompletionCondition completion_condition) : completion_condition_(completion_condition) { } std::size_t check_for_completion( const asio::error_code& ec, std::size_t total_transferred) { return detail::adapt_completion_condition_result( completion_condition_(ec, total_transferred)); } private: CompletionCondition completion_condition_; }; template <> class base_from_completion_cond { protected: explicit base_from_completion_cond(transfer_all_t) { } static std::size_t check_for_completion( const asio::error_code& ec, std::size_t total_transferred) { return transfer_all_t()(ec, total_transferred); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP galera-3-25.3.20/asio/asio/detail/timer_queue.hpp0000644000015300001660000002123413042054732021325 0ustar jenkinsjenkins// // detail/timer_queue.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_HPP #define ASIO_DETAIL_TIMER_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/cstdint.hpp" #include "asio/detail/date_time_fwd.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/wait_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class timer_queue : public timer_queue_base { public: // The time type. typedef typename Time_Traits::time_type time_type; // The duration type. typedef typename Time_Traits::duration_type duration_type; // Per-timer data. class per_timer_data { public: per_timer_data() : next_(0), prev_(0) {} private: friend class timer_queue; // The operations waiting on the timer. op_queue op_queue_; // The index of the timer in the heap. std::size_t heap_index_; // Pointers to adjacent timers in a linked list. per_timer_data* next_; per_timer_data* prev_; }; // Constructor. timer_queue() : timers_(), heap_() { } // Add a new timer to the queue. Returns true if this is the timer that is // earliest in the queue, in which case the reactor's event demultiplexing // function call may need to be interrupted and restarted. bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op) { // Enqueue the timer object. if (timer.prev_ == 0 && &timer != timers_) { if (this->is_positive_infinity(time)) { // No heap entry is required for timers that never expire. timer.heap_index_ = (std::numeric_limits::max)(); } else { // Put the new timer at the correct position in the heap. This is done // first since push_back() can throw due to allocation failure. timer.heap_index_ = heap_.size(); heap_entry entry = { time, &timer }; heap_.push_back(entry); up_heap(heap_.size() - 1); } // Insert the new timer into the linked list of active timers. timer.next_ = timers_; timer.prev_ = 0; if (timers_) timers_->prev_ = &timer; timers_ = &timer; } // Enqueue the individual timer operation. timer.op_queue_.push(op); // Interrupt reactor only if newly added timer is first to expire. return timer.heap_index_ == 0 && timer.op_queue_.front() == op; } // Whether there are no timers in the queue. virtual bool empty() const { return timers_ == 0; } // Get the time for the timer that is earliest in the queue. virtual long wait_duration_msec(long max_duration) const { if (heap_.empty()) return max_duration; return this->to_msec( Time_Traits::to_posix_duration( Time_Traits::subtract(heap_[0].time_, Time_Traits::now())), max_duration); } // Get the time for the timer that is earliest in the queue. virtual long wait_duration_usec(long max_duration) const { if (heap_.empty()) return max_duration; return this->to_usec( Time_Traits::to_posix_duration( Time_Traits::subtract(heap_[0].time_, Time_Traits::now())), max_duration); } // Dequeue all timers not later than the current time. virtual void get_ready_timers(op_queue& ops) { if (!heap_.empty()) { const time_type now = Time_Traits::now(); while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_)) { per_timer_data* timer = heap_[0].timer_; ops.push(timer->op_queue_); remove_timer(*timer); } } } // Dequeue all timers. virtual void get_all_timers(op_queue& ops) { while (timers_) { per_timer_data* timer = timers_; timers_ = timers_->next_; ops.push(timer->op_queue_); timer->next_ = 0; timer->prev_ = 0; } heap_.clear(); } // Cancel and dequeue operations for the given timer. std::size_t cancel_timer(per_timer_data& timer, op_queue& ops, std::size_t max_cancelled = (std::numeric_limits::max)()) { std::size_t num_cancelled = 0; if (timer.prev_ != 0 || &timer == timers_) { while (wait_op* op = (num_cancelled != max_cancelled) ? timer.op_queue_.front() : 0) { op->ec_ = asio::error::operation_aborted; timer.op_queue_.pop(); ops.push(op); ++num_cancelled; } if (timer.op_queue_.empty()) remove_timer(timer); } return num_cancelled; } private: // Move the item at the given index up the heap to its correct position. void up_heap(std::size_t index) { while (index > 0) { std::size_t parent = (index - 1) / 2; if (!Time_Traits::less_than(heap_[index].time_, heap_[parent].time_)) break; swap_heap(index, parent); index = parent; } } // Move the item at the given index down the heap to its correct position. void down_heap(std::size_t index) { std::size_t child = index * 2 + 1; while (child < heap_.size()) { std::size_t min_child = (child + 1 == heap_.size() || Time_Traits::less_than( heap_[child].time_, heap_[child + 1].time_)) ? child : child + 1; if (Time_Traits::less_than(heap_[index].time_, heap_[min_child].time_)) break; swap_heap(index, min_child); index = min_child; child = index * 2 + 1; } } // Swap two entries in the heap. void swap_heap(std::size_t index1, std::size_t index2) { heap_entry tmp = heap_[index1]; heap_[index1] = heap_[index2]; heap_[index2] = tmp; heap_[index1].timer_->heap_index_ = index1; heap_[index2].timer_->heap_index_ = index2; } // Remove a timer from the heap and list of timers. void remove_timer(per_timer_data& timer) { // Remove the timer from the heap. std::size_t index = timer.heap_index_; if (!heap_.empty() && index < heap_.size()) { if (index == heap_.size() - 1) { heap_.pop_back(); } else { swap_heap(index, heap_.size() - 1); heap_.pop_back(); if (index > 0 && Time_Traits::less_than( heap_[index].time_, heap_[(index - 1) / 2].time_)) up_heap(index); else down_heap(index); } } // Remove the timer from the linked list of active timers. if (timers_ == &timer) timers_ = timer.next_; if (timer.prev_) timer.prev_->next_ = timer.next_; if (timer.next_) timer.next_->prev_= timer.prev_; timer.next_ = 0; timer.prev_ = 0; } // Determine if the specified absolute time is positive infinity. template static bool is_positive_infinity(const Time_Type&) { return false; } // Determine if the specified absolute time is positive infinity. template static bool is_positive_infinity( const boost::date_time::base_time& time) { return time.is_pos_infinity(); } // Helper function to convert a duration into milliseconds. template long to_msec(const Duration& d, long max_duration) const { if (d.ticks() <= 0) return 0; int64_t msec = d.total_milliseconds(); if (msec == 0) return 1; if (msec > max_duration) return max_duration; return static_cast(msec); } // Helper function to convert a duration into microseconds. template long to_usec(const Duration& d, long max_duration) const { if (d.ticks() <= 0) return 0; int64_t usec = d.total_microseconds(); if (usec == 0) return 1; if (usec > max_duration) return max_duration; return static_cast(usec); } // The head of a linked list of all active timers. per_timer_data* timers_; struct heap_entry { // The time when the timer should fire. time_type time_; // The associated timer with enqueued operations. per_timer_data* timer_; }; // The heap of timers, with the earliest timer at the front. std::vector heap_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TIMER_QUEUE_HPP galera-3-25.3.20/asio/asio/detail/chrono_time_traits.hpp0000644000015300001660000001045213042054732022675 0ustar jenkinsjenkins// // detail/chrono_time_traits.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP #define ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/cstdint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper template to compute the greatest common divisor. template struct gcd { enum { value = gcd::value }; }; template struct gcd { enum { value = v1 }; }; // Adapts std::chrono clocks for use with a deadline timer. template struct chrono_time_traits { // The clock type. typedef Clock clock_type; // The duration type of the clock. typedef typename clock_type::duration duration_type; // The time point type of the clock. typedef typename clock_type::time_point time_type; // The period of the clock. typedef typename duration_type::period period_type; // Get the current time. static time_type now() { return clock_type::now(); } // Add a duration to a time. static time_type add(const time_type& t, const duration_type& d) { const time_type epoch; if (t >= epoch) { if ((time_type::max)() - t < d) return (time_type::max)(); } else // t < epoch { if (-(t - (time_type::min)()) > d) return (time_type::min)(); } return t + d; } // Subtract one time from another. static duration_type subtract(const time_type& t1, const time_type& t2) { const time_type epoch; if (t1 >= epoch) { if (t2 >= epoch) { return t1 - t2; } else if (t2 == (time_type::min)()) { return (duration_type::max)(); } else if ((time_type::max)() - t1 < epoch - t2) { return (duration_type::max)(); } else { return t1 - t2; } } else // t1 < epoch { if (t2 < epoch) { return t1 - t2; } else if (t1 == (time_type::min)()) { return (duration_type::min)(); } else if ((time_type::max)() - t2 < epoch - t1) { return (duration_type::min)(); } else { return -(t2 - t1); } } } // Test whether one time is less than another. static bool less_than(const time_type& t1, const time_type& t2) { return t1 < t2; } // Implement just enough of the posix_time::time_duration interface to supply // what the timer_queue requires. class posix_time_duration { public: explicit posix_time_duration(const duration_type& d) : d_(d) { } int64_t ticks() const { return d_.count(); } int64_t total_seconds() const { return duration_cast<1, 1>(); } int64_t total_milliseconds() const { return duration_cast<1, 1000>(); } int64_t total_microseconds() const { return duration_cast<1, 1000000>(); } private: template int64_t duration_cast() const { const int64_t num1 = period_type::num / gcd::value; const int64_t num2 = Num / gcd::value; const int64_t den1 = period_type::den / gcd::value; const int64_t den2 = Den / gcd::value; const int64_t num = num1 * den2; const int64_t den = num2 * den1; if (num == 1 && den == 1) return ticks(); else if (num != 1 && den == 1) return ticks() * num; else if (num == 1 && period_type::den != 1) return ticks() / den; else return ticks() * num / den; } duration_type d_; }; // Convert to POSIX duration type. static posix_time_duration to_posix_duration(const duration_type& d) { return posix_time_duration(WaitTraits::to_wait_duration(d)); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP galera-3-25.3.20/asio/asio/detail/reactor_op.hpp0000644000015300001660000000256113042054732021140 0ustar jenkinsjenkins// // detail/reactor_op.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_OP_HPP #define ASIO_DETAIL_REACTOR_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactor_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The number of bytes transferred, to be passed to the completion handler. std::size_t bytes_transferred_; // Perform the operation. Returns true if it is finished. bool perform() { return perform_func_(this); } protected: typedef bool (*perform_func_type)(reactor_op*); reactor_op(perform_func_type perform_func, func_type complete_func) : operation(complete_func), bytes_transferred_(0), perform_func_(perform_func) { } private: perform_func_type perform_func_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTOR_OP_HPP galera-3-25.3.20/asio/asio/detail/win_static_mutex.hpp0000644000015300001660000000340313042054732022365 0ustar jenkinsjenkins// // detail/win_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_STATIC_MUTEX_HPP #define ASIO_DETAIL_WIN_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. ASIO_DECL void init(); // Initialisation must be performed in a separate function to the "public" // init() function since the compiler does not support the use of structured // exceptions and C++ exceptions in the same function. ASIO_DECL int do_init(); // Lock the mutex. void lock() { ::EnterCriticalSection(&crit_section_); } // Unlock the mutex. void unlock() { ::LeaveCriticalSection(&crit_section_); } bool initialised_; ::CRITICAL_SECTION crit_section_; }; #if defined(UNDER_CE) # define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0 } } #else // defined(UNDER_CE) # define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0, 0 } } #endif // defined(UNDER_CE) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_static_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_STATIC_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/noncopyable.hpp0000644000015300001660000000162613042054732021315 0ustar jenkinsjenkins// // detail/noncopyable.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NONCOPYABLE_HPP #define ASIO_DETAIL_NONCOPYABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class noncopyable { protected: noncopyable() {} ~noncopyable() {} private: noncopyable(const noncopyable&); const noncopyable& operator=(const noncopyable&); }; } // namespace detail using asio::detail::noncopyable; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NONCOPYABLE_HPP galera-3-25.3.20/asio/asio/detail/local_free_on_block_exit.hpp0000644000015300001660000000241313042054732023771 0ustar jenkinsjenkins// // detail/local_free_on_block_exit.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP #define ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class local_free_on_block_exit : private noncopyable { public: // Constructor blocks all signals for the calling thread. explicit local_free_on_block_exit(void* p) : p_(p) { } // Destructor restores the previous signal mask. ~local_free_on_block_exit() { ::LocalFree(p_); } private: void* p_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP galera-3-25.3.20/asio/asio/detail/operation.hpp0000644000015300001660000000157013042054732021002 0ustar jenkinsjenkins// // detail/operation.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OPERATION_HPP #define ASIO_DETAIL_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_operation.hpp" #else # include "asio/detail/task_io_service_operation.hpp" #endif namespace asio { namespace detail { #if defined(ASIO_HAS_IOCP) typedef win_iocp_operation operation; #else typedef task_io_service_operation operation; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_OPERATION_HPP galera-3-25.3.20/asio/asio/detail/null_reactor.hpp0000644000015300001660000000247113042054732021474 0ustar jenkinsjenkins// // detail/null_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_REACTOR_HPP #define ASIO_DETAIL_NULL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_reactor : public asio::detail::service_base { public: // Constructor. null_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service) { } // Destructor. ~null_reactor() { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // No-op because should never be called. void run(bool /*block*/, op_queue& /*ops*/) { } // No-op. void interrupt() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_NULL_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/socket_types.hpp0000644000015300001660000003411013042054732021512 0ustar jenkinsjenkins// // detail/socket_types.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_TYPES_HPP #define ASIO_DETAIL_SOCKET_TYPES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) // Empty. #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_) # error WinSock.h has already been included # endif // defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_) # if defined(__BORLANDC__) # include // Needed for __errno # if !defined(_WSPIAPI_H_) # define _WSPIAPI_H_ # define ASIO_WSPIAPI_H_DEFINED # endif // !defined(_WSPIAPI_H_) # endif // defined(__BORLANDC__) # if defined(WINAPI_FAMILY) # if ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0) # include # endif // ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0) # endif // defined(WINAPI_FAMILY) # include # include # include # if defined(ASIO_WSPIAPI_H_DEFINED) # undef _WSPIAPI_H_ # undef ASIO_WSPIAPI_H_DEFINED # endif // defined(ASIO_WSPIAPI_H_DEFINED) # if !defined(ASIO_NO_DEFAULT_LINKED_LIBS) # if defined(UNDER_CE) # pragma comment(lib, "ws2.lib") # elif defined(_MSC_VER) || defined(__BORLANDC__) # pragma comment(lib, "ws2_32.lib") # pragma comment(lib, "mswsock.lib") # endif // defined(_MSC_VER) || defined(__BORLANDC__) # endif // !defined(ASIO_NO_DEFAULT_LINKED_LIBS) # include "asio/detail/old_win_sdk_compat.hpp" #else # include # if !defined(__SYMBIAN32__) # include # endif # include # include # include # if defined(__hpux) # include # endif # if !defined(__hpux) || defined(__SELECT) # include # endif # include # include # include # include # if !defined(__SYMBIAN32__) # include # endif # include # include # include # include # if defined(__sun) # include # include # endif #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) const int max_addr_v4_str_len = 256; const int max_addr_v6_str_len = 256; typedef unsigned __int32 u_long_type; typedef unsigned __int16 u_short_type; struct in4_addr_type { u_long_type s_addr; }; struct in4_mreq_type { in4_addr_type imr_multiaddr, imr_interface; }; struct in6_addr_type { unsigned char s6_addr[16]; }; struct in6_mreq_type { in6_addr_type ipv6mr_multiaddr; unsigned long ipv6mr_interface; }; struct socket_addr_type { int sa_family; }; struct sockaddr_in4_type { int sin_family; in4_addr_type sin_addr; u_short_type sin_port; }; struct sockaddr_in6_type { int sin6_family; in6_addr_type sin6_addr; u_short_type sin6_port; u_long_type sin6_flowinfo; u_long_type sin6_scope_id; }; struct sockaddr_storage_type { int ss_family; unsigned char ss_bytes[128 - sizeof(int)]; }; struct addrinfo_type { int ai_flags; int ai_family, ai_socktype, ai_protocol; int ai_addrlen; const void* ai_addr; const char* ai_canonname; addrinfo_type* ai_next; }; struct linger_type { u_short_type l_onoff, l_linger; }; typedef u_long_type ioctl_arg_type; typedef int signed_size_type; # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC 0 # define ASIO_OS_DEF_AF_INET 2 # define ASIO_OS_DEF_AF_INET6 23 # define ASIO_OS_DEF_SOCK_STREAM 1 # define ASIO_OS_DEF_SOCK_DGRAM 2 # define ASIO_OS_DEF_SOCK_RAW 3 # define ASIO_OS_DEF_SOCK_SEQPACKET 5 # define ASIO_OS_DEF_IPPROTO_IP 0 # define ASIO_OS_DEF_IPPROTO_IPV6 41 # define ASIO_OS_DEF_IPPROTO_TCP 6 # define ASIO_OS_DEF_IPPROTO_UDP 17 # define ASIO_OS_DEF_IPPROTO_ICMP 1 # define ASIO_OS_DEF_IPPROTO_ICMPV6 58 # define ASIO_OS_DEF_FIONBIO 1 # define ASIO_OS_DEF_FIONREAD 2 # define ASIO_OS_DEF_INADDR_ANY 0 # define ASIO_OS_DEF_MSG_OOB 0x1 # define ASIO_OS_DEF_MSG_PEEK 0x2 # define ASIO_OS_DEF_MSG_DONTROUTE 0x4 # define ASIO_OS_DEF_MSG_EOR 0 // Not supported. # define ASIO_OS_DEF_SHUT_RD 0x0 # define ASIO_OS_DEF_SHUT_WR 0x1 # define ASIO_OS_DEF_SHUT_RDWR 0x2 # define ASIO_OS_DEF_SOMAXCONN 0x7fffffff # define ASIO_OS_DEF_SOL_SOCKET 0xffff # define ASIO_OS_DEF_SO_BROADCAST 0x20 # define ASIO_OS_DEF_SO_DEBUG 0x1 # define ASIO_OS_DEF_SO_DONTROUTE 0x10 # define ASIO_OS_DEF_SO_KEEPALIVE 0x8 # define ASIO_OS_DEF_SO_LINGER 0x80 # define ASIO_OS_DEF_SO_SNDBUF 0x1001 # define ASIO_OS_DEF_SO_RCVBUF 0x1002 # define ASIO_OS_DEF_SO_SNDLOWAT 0x1003 # define ASIO_OS_DEF_SO_RCVLOWAT 0x1004 # define ASIO_OS_DEF_SO_REUSEADDR 0x4 # define ASIO_OS_DEF_TCP_NODELAY 0x1 # define ASIO_OS_DEF_IP_MULTICAST_IF 2 # define ASIO_OS_DEF_IP_MULTICAST_TTL 3 # define ASIO_OS_DEF_IP_MULTICAST_LOOP 4 # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP 5 # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP 6 # define ASIO_OS_DEF_IP_TTL 7 # define ASIO_OS_DEF_IPV6_UNICAST_HOPS 4 # define ASIO_OS_DEF_IPV6_MULTICAST_IF 9 # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS 10 # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP 11 # define ASIO_OS_DEF_IPV6_JOIN_GROUP 12 # define ASIO_OS_DEF_IPV6_LEAVE_GROUP 13 # define ASIO_OS_DEF_AI_CANONNAME 0x2 # define ASIO_OS_DEF_AI_PASSIVE 0x1 # define ASIO_OS_DEF_AI_NUMERICHOST 0x4 # define ASIO_OS_DEF_AI_NUMERICSERV 0x8 # define ASIO_OS_DEF_AI_V4MAPPED 0x800 # define ASIO_OS_DEF_AI_ALL 0x100 # define ASIO_OS_DEF_AI_ADDRCONFIG 0x400 #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef SOCKET socket_type; const SOCKET invalid_socket = INVALID_SOCKET; const int socket_error_retval = SOCKET_ERROR; const int max_addr_v4_str_len = 256; const int max_addr_v6_str_len = 256; typedef sockaddr socket_addr_type; typedef in_addr in4_addr_type; typedef ip_mreq in4_mreq_type; typedef sockaddr_in sockaddr_in4_type; # if defined(ASIO_HAS_OLD_WIN_SDK) typedef in6_addr_emulation in6_addr_type; typedef ipv6_mreq_emulation in6_mreq_type; typedef sockaddr_in6_emulation sockaddr_in6_type; typedef sockaddr_storage_emulation sockaddr_storage_type; typedef addrinfo_emulation addrinfo_type; # else typedef in6_addr in6_addr_type; typedef ipv6_mreq in6_mreq_type; typedef sockaddr_in6 sockaddr_in6_type; typedef sockaddr_storage sockaddr_storage_type; typedef addrinfo addrinfo_type; # endif typedef ::linger linger_type; typedef unsigned long ioctl_arg_type; typedef u_long u_long_type; typedef u_short u_short_type; typedef int signed_size_type; # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC # define ASIO_OS_DEF_AF_INET AF_INET # define ASIO_OS_DEF_AF_INET6 AF_INET6 # define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM # define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM # define ASIO_OS_DEF_SOCK_RAW SOCK_RAW # define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET # define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP # define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6 # define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP # define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP # define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP # define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6 # define ASIO_OS_DEF_FIONBIO FIONBIO # define ASIO_OS_DEF_FIONREAD FIONREAD # define ASIO_OS_DEF_INADDR_ANY INADDR_ANY # define ASIO_OS_DEF_MSG_OOB MSG_OOB # define ASIO_OS_DEF_MSG_PEEK MSG_PEEK # define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE # define ASIO_OS_DEF_MSG_EOR 0 // Not supported on Windows. # define ASIO_OS_DEF_SHUT_RD SD_RECEIVE # define ASIO_OS_DEF_SHUT_WR SD_SEND # define ASIO_OS_DEF_SHUT_RDWR SD_BOTH # define ASIO_OS_DEF_SOMAXCONN SOMAXCONN # define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET # define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST # define ASIO_OS_DEF_SO_DEBUG SO_DEBUG # define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE # define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE # define ASIO_OS_DEF_SO_LINGER SO_LINGER # define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF # define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF # define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT # define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT # define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR # define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY # define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF # define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL # define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP # define ASIO_OS_DEF_IP_TTL IP_TTL # define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP # define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP # define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP # define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME # define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE # define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST # if defined(AI_NUMERICSERV) # define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV # else # define ASIO_OS_DEF_AI_NUMERICSERV 0 # endif # if defined(AI_V4MAPPED) # define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED # else # define ASIO_OS_DEF_AI_V4MAPPED 0 # endif # if defined(AI_ALL) # define ASIO_OS_DEF_AI_ALL AI_ALL # else # define ASIO_OS_DEF_AI_ALL 0 # endif # if defined(AI_ADDRCONFIG) # define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG # else # define ASIO_OS_DEF_AI_ADDRCONFIG 0 # endif # if defined (_WIN32_WINNT) const int max_iov_len = 64; # else const int max_iov_len = 16; # endif #else typedef int socket_type; const int invalid_socket = -1; const int socket_error_retval = -1; const int max_addr_v4_str_len = INET_ADDRSTRLEN; #if defined(INET6_ADDRSTRLEN) const int max_addr_v6_str_len = INET6_ADDRSTRLEN + 1 + IF_NAMESIZE; #else // defined(INET6_ADDRSTRLEN) const int max_addr_v6_str_len = 256; #endif // defined(INET6_ADDRSTRLEN) typedef sockaddr socket_addr_type; typedef in_addr in4_addr_type; # if defined(__hpux) // HP-UX doesn't provide ip_mreq when _XOPEN_SOURCE_EXTENDED is defined. struct in4_mreq_type { struct in_addr imr_multiaddr; struct in_addr imr_interface; }; # else typedef ip_mreq in4_mreq_type; # endif typedef sockaddr_in sockaddr_in4_type; typedef in6_addr in6_addr_type; typedef ipv6_mreq in6_mreq_type; typedef sockaddr_in6 sockaddr_in6_type; typedef sockaddr_storage sockaddr_storage_type; typedef sockaddr_un sockaddr_un_type; typedef addrinfo addrinfo_type; typedef ::linger linger_type; typedef int ioctl_arg_type; typedef uint32_t u_long_type; typedef uint16_t u_short_type; #if defined(ASIO_HAS_SSIZE_T) typedef ssize_t signed_size_type; #else // defined(ASIO_HAS_SSIZE_T) typedef int signed_size_type; #endif // defined(ASIO_HAS_SSIZE_T) # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC # define ASIO_OS_DEF_AF_INET AF_INET # define ASIO_OS_DEF_AF_INET6 AF_INET6 # define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM # define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM # define ASIO_OS_DEF_SOCK_RAW SOCK_RAW # define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET # define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP # define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6 # define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP # define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP # define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP # define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6 # define ASIO_OS_DEF_FIONBIO FIONBIO # define ASIO_OS_DEF_FIONREAD FIONREAD # define ASIO_OS_DEF_INADDR_ANY INADDR_ANY # define ASIO_OS_DEF_MSG_OOB MSG_OOB # define ASIO_OS_DEF_MSG_PEEK MSG_PEEK # define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE # define ASIO_OS_DEF_MSG_EOR MSG_EOR # define ASIO_OS_DEF_SHUT_RD SHUT_RD # define ASIO_OS_DEF_SHUT_WR SHUT_WR # define ASIO_OS_DEF_SHUT_RDWR SHUT_RDWR # define ASIO_OS_DEF_SOMAXCONN SOMAXCONN # define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET # define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST # define ASIO_OS_DEF_SO_DEBUG SO_DEBUG # define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE # define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE # define ASIO_OS_DEF_SO_LINGER SO_LINGER # define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF # define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF # define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT # define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT # define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR # define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY # define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF # define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL # define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP # define ASIO_OS_DEF_IP_TTL IP_TTL # define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP # define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP # define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP # define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME # define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE # define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST # if defined(AI_NUMERICSERV) # define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV # else # define ASIO_OS_DEF_AI_NUMERICSERV 0 # endif // Note: QNX Neutrino 6.3 defines AI_V4MAPPED, AI_ALL and AI_ADDRCONFIG but // does not implement them. Therefore they are specifically excluded here. # if defined(AI_V4MAPPED) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED # else # define ASIO_OS_DEF_AI_V4MAPPED 0 # endif # if defined(AI_ALL) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_ALL AI_ALL # else # define ASIO_OS_DEF_AI_ALL 0 # endif # if defined(AI_ADDRCONFIG) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG # else # define ASIO_OS_DEF_AI_ADDRCONFIG 0 # endif # if defined(IOV_MAX) const int max_iov_len = IOV_MAX; # else // POSIX platforms are not required to define IOV_MAX. const int max_iov_len = 16; # endif #endif const int custom_socket_option_level = 0xA5100000; const int enable_connection_aborted_option = 1; const int always_fail_option = 2; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_TYPES_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_handle_service.hpp0000644000015300001660000002653613042054732023655 0ustar jenkinsjenkins// // detail/win_iocp_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/win_iocp_handle_read_op.hpp" #include "asio/detail/win_iocp_handle_write_op.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_handle_service { public: // The native type of a stream handle. typedef HANDLE native_handle_type; // The implementation type of the stream handle. class implementation_type { public: // Default constructor. implementation_type() : handle_(INVALID_HANDLE_VALUE), safe_cancellation_thread_id_(0), next_(0), prev_(0) { } private: // Only this service will have access to the internal values. friend class win_iocp_handle_service; // The native stream handle representation. native_handle_type handle_; // The ID of the thread from which it is safe to cancel asynchronous // operations. 0 means no asynchronous operations have been started yet. // ~0 means asynchronous operations have been started from more than one // thread, and cancellation is not supported for the handle. DWORD safe_cancellation_thread_id_; // Pointers to adjacent handle implementations in linked list. implementation_type* next_; implementation_type* prev_; }; ASIO_DECL win_iocp_handle_service(asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new handle implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new handle implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another handle implementation. ASIO_DECL void move_assign(implementation_type& impl, win_iocp_handle_service& other_service, implementation_type& other_impl); // Destroy a handle implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native handle to a handle implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec); // Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return impl.handle_ != INVALID_HANDLE_VALUE; } // Destroy a handle implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native handle representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.handle_; } // Cancel all operations associated with the handle. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Write the given data. Returns the number of bytes written. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return write_some_at(impl, 0, buffers, ec); } // Write the given data at the specified offset. Returns the number of bytes // written. template size_t write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { asio::const_buffer buffer = buffer_sequence_adapter::first(buffers); return do_write(impl, offset, buffer, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_write_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_write_some")); start_write_op(impl, 0, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Start an asynchronous write at a specified offset. The data being written // must be valid for the lifetime of the asynchronous operation. template void async_write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_write_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_write_some_at")); start_write_op(impl, offset, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return read_some_at(impl, 0, buffers, ec); } // Read some data at a specified offset. Returns the number of bytes received. template size_t read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { asio::mutable_buffer buffer = buffer_sequence_adapter::first(buffers); return do_read(impl, offset, buffer, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_read_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_read_some")); start_read_op(impl, 0, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Start an asynchronous read at a specified offset. The buffer for the data // being received must be valid for the lifetime of the asynchronous // operation. template void async_read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_read_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_read_some_at")); start_read_op(impl, offset, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } private: // Prevent the use of the null_buffers type with this service. size_t write_some(implementation_type& impl, const null_buffers& buffers, asio::error_code& ec); size_t write_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, asio::error_code& ec); template void async_write_some(implementation_type& impl, const null_buffers& buffers, Handler& handler); template void async_write_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, Handler& handler); size_t read_some(implementation_type& impl, const null_buffers& buffers, asio::error_code& ec); size_t read_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, asio::error_code& ec); template void async_read_some(implementation_type& impl, const null_buffers& buffers, Handler& handler); template void async_read_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, Handler& handler); // Helper class for waiting for synchronous operations to complete. class overlapped_wrapper; // Helper function to perform a synchronous write operation. ASIO_DECL size_t do_write(implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, asio::error_code& ec); // Helper function to start a write operation. ASIO_DECL void start_write_op(implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, operation* op); // Helper function to perform a synchronous write operation. ASIO_DECL size_t do_read(implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, asio::error_code& ec); // Helper function to start a read operation. ASIO_DECL void start_read_op(implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, operation* op); // Update the ID of the thread from which cancellation is safe. ASIO_DECL void update_cancellation_thread_id(implementation_type& impl); // Helper function to close a handle when the associated object is being // destroyed. ASIO_DECL void close_for_destruction(implementation_type& impl); // The IOCP service used for running asynchronous operations and dispatching // handlers. win_iocp_io_service& iocp_service_; // Mutex to protect access to the linked list of implementations. mutex mutex_; // The head of a linked list of all implementations. implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_handle_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/variadic_templates.hpp0000644000015300001660000000375213042054732022646 0ustar jenkinsjenkins// // detail/variadic_templates.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_VARIADIC_TEMPLATES_HPP #define ASIO_DETAIL_VARIADIC_TEMPLATES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # define ASIO_VARIADIC_TPARAMS(n) ASIO_VARIADIC_TPARAMS_##n # define ASIO_VARIADIC_TPARAMS_1 \ typename T1 # define ASIO_VARIADIC_TPARAMS_2 \ typename T1, typename T2 # define ASIO_VARIADIC_TPARAMS_3 \ typename T1, typename T2, typename T3 # define ASIO_VARIADIC_TPARAMS_4 \ typename T1, typename T2, typename T3, typename T4 # define ASIO_VARIADIC_TPARAMS_5 \ typename T1, typename T2, typename T3, typename T4, typename T5 # define ASIO_VARIADIC_TARGS(n) ASIO_VARIADIC_TARGS_##n # define ASIO_VARIADIC_TARGS_1 x1 # define ASIO_VARIADIC_TARGS_2 x1, x2 # define ASIO_VARIADIC_TARGS_3 x1, x2, x3 # define ASIO_VARIADIC_TARGS_4 x1, x2, x3, x4 # define ASIO_VARIADIC_TARGS_5 x1, x2, x3, x4, x5 # define ASIO_VARIADIC_PARAMS(n) ASIO_VARIADIC_PARAMS_##n # define ASIO_VARIADIC_PARAMS_1 T1 x1 # define ASIO_VARIADIC_PARAMS_2 T1 x1, T2 x2 # define ASIO_VARIADIC_PARAMS_3 T1 x1, T2 x2, T3 x3 # define ASIO_VARIADIC_PARAMS_4 T1 x1, T2 x2, T3 x3, T4 x4 # define ASIO_VARIADIC_PARAMS_5 T1 x1, T2 x2, T3 x3, T4 x4, T5 x5 # define ASIO_VARIADIC_ARGS(n) ASIO_VARIADIC_ARGS_##n # define ASIO_VARIADIC_ARGS_1 x1 # define ASIO_VARIADIC_ARGS_2 x1, x2 # define ASIO_VARIADIC_ARGS_3 x1, x2, x3 # define ASIO_VARIADIC_ARGS_4 x1, x2, x3, x4 # define ASIO_VARIADIC_ARGS_5 x1, x2, x3, x4, x5 # define ASIO_VARIADIC_GENERATE(m) m(1) m(2) m(3) m(4) m(5) #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // ASIO_DETAIL_VARIADIC_TEMPLATES_HPP galera-3-25.3.20/asio/asio/detail/wait_op.hpp0000644000015300001660000000164313042054732020445 0ustar jenkinsjenkins// // detail/wait_op.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WAIT_OP_HPP #define ASIO_DETAIL_WAIT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class wait_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: wait_op(func_type func) : operation(func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WAIT_OP_HPP galera-3-25.3.20/asio/asio/detail/epoll_reactor.hpp0000644000015300001660000001757313042054732021646 0ustar jenkinsjenkins// // detail/epoll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EPOLL_REACTOR_HPP #define ASIO_DETAIL_EPOLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EPOLL) #include "asio/io_service.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/object_pool.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class epoll_reactor : public asio::detail::service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor queues. class descriptor_state : operation { friend class epoll_reactor; friend class object_pool_access; descriptor_state* next_; descriptor_state* prev_; mutex mutex_; epoll_reactor* reactor_; int descriptor_; uint32_t registered_events_; op_queue op_queue_[max_ops]; bool shutdown_; ASIO_DECL descriptor_state(); void set_ready_events(uint32_t events) { task_result_ = events; } ASIO_DECL operation* perform_io(uint32_t events); ASIO_DECL static void do_complete( io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred); }; // Per-descriptor data. typedef descriptor_state* per_descriptor_data; // Constructor. ASIO_DECL epoll_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~epoll_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data, bool closing); // Remote the descriptor's registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& timer_queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& timer_queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run epoll once until interrupted or events are ready to be dispatched. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: // The hint to pass to epoll_create to size its data structures. enum { epoll_size = 20000 }; // Create the epoll file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_epoll_create(); // Create the timerfd file descriptor. Does not throw. ASIO_DECL static int do_timerfd_create(); // Allocate a new descriptor state object. ASIO_DECL descriptor_state* allocate_descriptor_state(); // Free an existing descriptor state object. ASIO_DECL void free_descriptor_state(descriptor_state* s); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Called to recalculate and update the timeout. ASIO_DECL void update_timeout(); // Get the timeout value for the epoll_wait call. The timeout value is // returned as a number of milliseconds. A return value of -1 indicates // that epoll_wait should block indefinitely. ASIO_DECL int get_timeout(); #if defined(ASIO_HAS_TIMERFD) // Get the timeout value for the timer descriptor. The return value is the // flag argument to be used when calling timerfd_settime. ASIO_DECL int get_timeout(itimerspec& ts); #endif // defined(ASIO_HAS_TIMERFD) // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. mutex mutex_; // The interrupter is used to break a blocking epoll_wait call. select_interrupter interrupter_; // The epoll file descriptor. int epoll_fd_; // The timer file descriptor. int timer_fd_; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; // Mutex to protect access to the registered descriptors. mutex registered_descriptors_mutex_; // Keep track of all registered descriptors. object_pool registered_descriptors_; // Helper class to do post-perform_io cleanup. struct perform_io_cleanup_on_block_exit; friend struct perform_io_cleanup_on_block_exit; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/epoll_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/epoll_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_EPOLL_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/winrt_async_manager.hpp0000644000015300001660000002026413042054732023035 0ustar jenkinsjenkins// // detail/winrt_async_manager.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP #define ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/atomic_count.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_async_manager : public asio::detail::service_base { public: // Constructor. winrt_async_manager(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), outstanding_ops_(1) { } // Destructor. ~winrt_async_manager() { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { if (--outstanding_ops_ > 0) { // Block until last operation is complete. std::future f = promise_.get_future(); f.wait(); } } void sync(Windows::Foundation::IAsyncAction^ action, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); action->Completed = ref new AsyncActionCompletedHandler( [promise](IAsyncAction^ action, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( action->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); } template TResult sync(Windows::Foundation::IAsyncOperation^ operation, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); operation->Completed = ref new AsyncOperationCompletedHandler( [promise](IAsyncOperation^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( operation->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); return operation->GetResults(); } template TResult sync( Windows::Foundation::IAsyncOperationWithProgress< TResult, TProgress>^ operation, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); operation->Completed = ref new AsyncOperationWithProgressCompletedHandler( [promise](IAsyncOperationWithProgress^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Started: break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( operation->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); return operation->GetResults(); } void async(Windows::Foundation::IAsyncAction^ action, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncActionCompletedHandler( [this, handler](IAsyncAction^ action, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: case AsyncStatus::Error: default: handler->ec_ = asio::error_code( action->ErrorCode.Value, asio::system_category()); break; } io_service_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); io_service_.work_started(); ++outstanding_ops_; action->Completed = on_completed; } template void async(Windows::Foundation::IAsyncOperation^ operation, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncOperationCompletedHandler( [this, handler](IAsyncOperation^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: handler->result_ = operation->GetResults(); // Fall through. case AsyncStatus::Error: default: handler->ec_ = asio::error_code( operation->ErrorCode.Value, asio::system_category()); break; } io_service_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); io_service_.work_started(); ++outstanding_ops_; operation->Completed = on_completed; } template void async( Windows::Foundation::IAsyncOperationWithProgress< TResult, TProgress>^ operation, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncOperationWithProgressCompletedHandler( [this, handler](IAsyncOperationWithProgress< TResult, TProgress>^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: handler->result_ = operation->GetResults(); // Fall through. case AsyncStatus::Error: default: handler->ec_ = asio::error_code( operation->ErrorCode.Value, asio::system_category()); break; } io_service_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); io_service_.work_started(); ++outstanding_ops_; operation->Completed = on_completed; } private: // The io_service implementation used to post completed handlers. io_service_impl& io_service_; // Count of outstanding operations. atomic_count outstanding_ops_; // Used to keep wait for outstanding operations to complete. std::promise promise_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP galera-3-25.3.20/asio/asio/detail/win_iocp_socket_service.hpp0000644000015300001660000004166513042054732023712 0ustar jenkinsjenkins// // detail/win_iocp_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/win_iocp_null_buffers_op.hpp" #include "asio/detail/win_iocp_socket_accept_op.hpp" #include "asio/detail/win_iocp_socket_connect_op.hpp" #include "asio/detail/win_iocp_socket_recvfrom_op.hpp" #include "asio/detail/win_iocp_socket_send_op.hpp" #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_service : public win_iocp_socket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. class native_handle_type { public: native_handle_type(socket_type s) : socket_(s), have_remote_endpoint_(false) { } native_handle_type(socket_type s, const endpoint_type& ep) : socket_(s), have_remote_endpoint_(true), remote_endpoint_(ep) { } void operator=(socket_type s) { socket_ = s; have_remote_endpoint_ = false; remote_endpoint_ = endpoint_type(); } operator socket_type() const { return socket_; } bool have_remote_endpoint() const { return have_remote_endpoint_; } endpoint_type remote_endpoint() const { return remote_endpoint_; } private: socket_type socket_; bool have_remote_endpoint_; endpoint_type remote_endpoint_; }; // The implementation type of the socket. struct implementation_type : win_iocp_socket_service_base::base_implementation_type { // Default constructor. implementation_type() : protocol_(endpoint_type().protocol()), have_remote_endpoint_(false), remote_endpoint_() { } // The protocol associated with the socket. protocol_type protocol_; // Whether we have a cached remote endpoint. bool have_remote_endpoint_; // A cached remote endpoint. endpoint_type remote_endpoint_; }; // Constructor. win_iocp_socket_service(asio::io_service& io_service) : win_iocp_socket_service_base(io_service) { } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = endpoint_type(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, win_iocp_socket_service_base& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = endpoint_type(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, typename win_iocp_socket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = typename Protocol1::endpoint(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (!do_open(impl, protocol.family(), protocol.type(), protocol.protocol(), ec)) { impl.protocol_ = protocol; impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = endpoint_type(); } return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (!do_assign(impl, protocol.type(), native_socket, ec)) { impl.protocol_ = protocol; impl.have_remote_endpoint_ = native_socket.have_remote_endpoint(); impl.remote_endpoint_ = native_socket.remote_endpoint(); } return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type& impl) { if (impl.have_remote_endpoint_) return native_handle_type(impl.socket_, impl.remote_endpoint_); return native_handle_type(impl.socket_); } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec); return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { socket_ops::setsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); socket_ops::getsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint = impl.remote_endpoint_; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getpeername(impl.socket_, endpoint.data(), &addr_len, impl.have_remote_endpoint_, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_sendto(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, destination.data(), destination.size(), ec); } // Wait until data can be sent without blocking. size_t send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to")); buffer_sequence_adapter bufs(buffers); start_send_to_op(impl, bufs.buffers(), bufs.count(), destination.data(), static_cast(destination.size()), flags, p.p); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to(null_buffers)")); start_reactor_op(impl, reactor::write_op, p.p); p.v = p.p = 0; } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); std::size_t addr_len = sender_endpoint.capacity(); std::size_t bytes_recvd = socket_ops::sync_recvfrom( impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, sender_endpoint.data(), &addr_len, ec); if (!ec) sender_endpoint.resize(addr_len); return bytes_recvd; } // Wait until data can be received without blocking. size_t receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endp, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recvfrom_op< MutableBufferSequence, endpoint_type, Handler> op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(sender_endp, impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from")); buffer_sequence_adapter bufs(buffers); start_receive_from_op(impl, bufs.buffers(), bufs.count(), sender_endp.data(), flags, &p.p->endpoint_size(), p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from(null_buffers)")); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); start_null_buffers_receive_op(impl, flags, p.p); p.v = p.p = 0; } // Accept a new connection. template asio::error_code accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec) { // We cannot accept a socket that is already open. if (peer.is_open()) { ec = asio::error::already_open; return ec; } std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0; socket_holder new_socket(socket_ops::sync_accept(impl.socket_, impl.state_, peer_endpoint ? peer_endpoint->data() : 0, peer_endpoint ? &addr_len : 0, ec)); // On success, assign new connection to peer socket object. if (new_socket.get() != invalid_socket) { if (peer_endpoint) peer_endpoint->resize(addr_len); if (!peer.assign(impl.protocol_, new_socket.get(), ec)) new_socket.release(); } return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; bool enable_connection_aborted = (impl.state_ & socket_ops::enable_connection_aborted) != 0; p.p = new (p.v) op(*this, impl.socket_, peer, impl.protocol_, peer_endpoint, enable_connection_aborted, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_accept")); start_accept_op(impl, peer.is_open(), p.p->new_socket(), impl.protocol_.family(), impl.protocol_.type(), impl.protocol_.protocol(), p.p->output_buffer(), p.p->address_length(), p.p); p.v = p.p = 0; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { socket_ops::sync_connect(impl.socket_, peer_endpoint.data(), peer_endpoint.size(), ec); return ec; } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_connect")); start_connect_op(impl, impl.protocol_.family(), impl.protocol_.type(), peer_endpoint.data(), static_cast(peer_endpoint.size()), p.p); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/handler_cont_helpers.hpp0000644000015300001660000000240713042054732023164 0ustar jenkinsjenkins// // detail/handler_cont_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP #define ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/handler_continuation_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_is_continuation must be made from a namespace that // does not contain overloads of this function. This namespace is defined here // for that purpose. namespace asio_handler_cont_helpers { template inline bool is_continuation(Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) return false; #else using asio::asio_handler_is_continuation; return asio_handler_is_continuation( asio::detail::addressof(context)); #endif } } // namespace asio_handler_cont_helpers #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP galera-3-25.3.20/asio/asio/detail/addressof.hpp0000644000015300001660000000170713042054732020756 0ustar jenkinsjenkins// // detail/addressof.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ADDRESSOF_HPP #define ASIO_DETAIL_ADDRESSOF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_ADDRESSOF) # include #else // defined(ASIO_HAS_STD_ADDRESSOF) # include #endif // defined(ASIO_HAS_STD_ADDRESSOF) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_ADDRESSOF) using std::addressof; #else // defined(ASIO_HAS_STD_ADDRESSOF) using boost::addressof; #endif // defined(ASIO_HAS_STD_ADDRESSOF) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ADDRESSOF_HPP galera-3-25.3.20/asio/asio/detail/posix_event.hpp0000644000015300001660000000517613042054732021353 0ustar jenkinsjenkins// // detail/posix_event.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_EVENT_HPP #define ASIO_DETAIL_POSIX_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_event : private noncopyable { public: // Constructor. ASIO_DECL posix_event(); // Destructor. ~posix_event() { ::pthread_cond_destroy(&cond_); } // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; ::pthread_cond_broadcast(&cond_); // Ignore EINVAL. } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) ::pthread_cond_signal(&cond_); // Ignore EINVAL. } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); ::pthread_cond_signal(&cond_); // Ignore EINVAL. return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); while ((state_ & 1) == 0) { state_ += 2; ::pthread_cond_wait(&cond_, &lock.mutex().mutex_); // Ignore EINVAL. state_ -= 2; } } private: ::pthread_cond_t cond_; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_EVENT_HPP galera-3-25.3.20/asio/asio/detail/descriptor_write_op.hpp0000644000015300001660000000707713042054732023100 0ustar jenkinsjenkins// // detail/descriptor_write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP #define ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class descriptor_write_op_base : public reactor_op { public: descriptor_write_op_base(int descriptor, const ConstBufferSequence& buffers, func_type complete_func) : reactor_op(&descriptor_write_op_base::do_perform, complete_func), descriptor_(descriptor), buffers_(buffers) { } static bool do_perform(reactor_op* base) { descriptor_write_op_base* o(static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return descriptor_ops::non_blocking_write(o->descriptor_, bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_); } private: int descriptor_; ConstBufferSequence buffers_; }; template class descriptor_write_op : public descriptor_write_op_base { public: ASIO_DEFINE_HANDLER_PTR(descriptor_write_op); descriptor_write_op(int descriptor, const ConstBufferSequence& buffers, Handler& handler) : descriptor_write_op_base( descriptor, buffers, &descriptor_write_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. descriptor_write_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP galera-3-25.3.20/asio/asio/detail/service_registry.hpp0000644000015300001660000001100313042054732022362 0ustar jenkinsjenkins// // detail/service_registry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SERVICE_REGISTRY_HPP #define ASIO_DETAIL_SERVICE_REGISTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class typeid_wrapper {}; class service_registry : private noncopyable { public: // Constructor. Adds the initial service. template service_registry(asio::io_service& o, Service* initial_service, Arg arg); // Destructor. ASIO_DECL ~service_registry(); // Notify all services of a fork event. ASIO_DECL void notify_fork(asio::io_service::fork_event fork_ev); // Get the first service object cast to the specified type. Called during // io_service construction and so performs no locking or type checking. template Service& first_service(); // Get the service object corresponding to the specified service type. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. template Service& use_service(); // Add a service object. Throws on error, in which case ownership of the // object is retained by the caller. template void add_service(Service* new_service); // Check whether a service object of the specified type already exists. template bool has_service() const; private: // Initialise a service's key based on its id. ASIO_DECL static void init_key( asio::io_service::service::key& key, const asio::io_service::id& id); #if !defined(ASIO_NO_TYPEID) // Initialise a service's key based on its id. template static void init_key(asio::io_service::service::key& key, const asio::detail::service_id& /*id*/); #endif // !defined(ASIO_NO_TYPEID) // Check if a service matches the given id. ASIO_DECL static bool keys_match( const asio::io_service::service::key& key1, const asio::io_service::service::key& key2); // The type of a factory function used for creating a service instance. typedef asio::io_service::service* (*factory_type)(asio::io_service&); // Factory function for creating a service instance. template static asio::io_service::service* create( asio::io_service& owner); // Destroy a service instance. ASIO_DECL static void destroy( asio::io_service::service* service); // Helper class to manage service pointers. struct auto_service_ptr; friend struct auto_service_ptr; struct auto_service_ptr { asio::io_service::service* ptr_; ~auto_service_ptr() { destroy(ptr_); } }; // Get the service object corresponding to the specified service key. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. ASIO_DECL asio::io_service::service* do_use_service( const asio::io_service::service::key& key, factory_type factory); // Add a service object. Throws on error, in which case ownership of the // object is retained by the caller. ASIO_DECL void do_add_service( const asio::io_service::service::key& key, asio::io_service::service* new_service); // Check whether a service object with the specified key already exists. ASIO_DECL bool do_has_service( const asio::io_service::service::key& key) const; // Mutex to protect access to internal data. mutable asio::detail::mutex mutex_; // The owner of this service registry and the services it contains. asio::io_service& owner_; // The first service in the list of contained services. asio::io_service::service* first_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/service_registry.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/service_registry.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SERVICE_REGISTRY_HPP galera-3-25.3.20/asio/asio/detail/task_io_service_thread_info.hpp0000644000015300001660000000203413042054732024511 0ustar jenkinsjenkins// // detail/task_io_service_thread_info.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TASK_IO_SERVICE_THREAD_INFO_HPP #define ASIO_DETAIL_TASK_IO_SERVICE_THREAD_INFO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/op_queue.hpp" #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class task_io_service; class task_io_service_operation; struct task_io_service_thread_info : public thread_info_base { op_queue private_op_queue; long private_outstanding_work; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TASK_IO_SERVICE_THREAD_INFO_HPP galera-3-25.3.20/asio/asio/detail/hash_map.hpp0000644000015300001660000001777213042054732020575 0ustar jenkinsjenkins// // detail/hash_map.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HASH_MAP_HPP #define ASIO_DETAIL_HASH_MAP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/socket_types.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { inline std::size_t calculate_hash_value(int i) { return static_cast(i); } inline std::size_t calculate_hash_value(void* p) { return reinterpret_cast(p) + (reinterpret_cast(p) >> 3); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) inline std::size_t calculate_hash_value(SOCKET s) { return static_cast(s); } #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Note: assumes K and V are POD types. template class hash_map : private noncopyable { public: // The type of a value in the map. typedef std::pair value_type; // The type of a non-const iterator over the hash map. typedef typename std::list::iterator iterator; // The type of a const iterator over the hash map. typedef typename std::list::const_iterator const_iterator; // Constructor. hash_map() : size_(0), buckets_(0), num_buckets_(0) { } // Destructor. ~hash_map() { delete[] buckets_; } // Get an iterator for the beginning of the map. iterator begin() { return values_.begin(); } // Get an iterator for the beginning of the map. const_iterator begin() const { return values_.begin(); } // Get an iterator for the end of the map. iterator end() { return values_.end(); } // Get an iterator for the end of the map. const_iterator end() const { return values_.end(); } // Check whether the map is empty. bool empty() const { return values_.empty(); } // Find an entry in the map. iterator find(const K& k) { if (num_buckets_) { size_t bucket = calculate_hash_value(k) % num_buckets_; iterator it = buckets_[bucket].first; if (it == values_.end()) return values_.end(); iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == k) return it; ++it; } } return values_.end(); } // Find an entry in the map. const_iterator find(const K& k) const { if (num_buckets_) { size_t bucket = calculate_hash_value(k) % num_buckets_; const_iterator it = buckets_[bucket].first; if (it == values_.end()) return it; const_iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == k) return it; ++it; } } return values_.end(); } // Insert a new entry into the map. std::pair insert(const value_type& v) { if (size_ + 1 >= num_buckets_) rehash(hash_size(size_ + 1)); size_t bucket = calculate_hash_value(v.first) % num_buckets_; iterator it = buckets_[bucket].first; if (it == values_.end()) { buckets_[bucket].first = buckets_[bucket].last = values_insert(values_.end(), v); ++size_; return std::pair(buckets_[bucket].last, true); } iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == v.first) return std::pair(it, false); ++it; } buckets_[bucket].last = values_insert(end_it, v); ++size_; return std::pair(buckets_[bucket].last, true); } // Erase an entry from the map. void erase(iterator it) { ASIO_ASSERT(it != values_.end()); ASIO_ASSERT(num_buckets_ != 0); size_t bucket = calculate_hash_value(it->first) % num_buckets_; bool is_first = (it == buckets_[bucket].first); bool is_last = (it == buckets_[bucket].last); if (is_first && is_last) buckets_[bucket].first = buckets_[bucket].last = values_.end(); else if (is_first) ++buckets_[bucket].first; else if (is_last) --buckets_[bucket].last; values_erase(it); --size_; } // Erase a key from the map. void erase(const K& k) { iterator it = find(k); if (it != values_.end()) erase(it); } // Remove all entries from the map. void clear() { // Clear the values. values_.clear(); size_ = 0; // Initialise all buckets to empty. iterator end_it = values_.end(); for (size_t i = 0; i < num_buckets_; ++i) buckets_[i].first = buckets_[i].last = end_it; } private: // Calculate the hash size for the specified number of elements. static std::size_t hash_size(std::size_t num_elems) { static std::size_t sizes[] = { #if defined(ASIO_HASH_MAP_BUCKETS) ASIO_HASH_MAP_BUCKETS #else // ASIO_HASH_MAP_BUCKETS 3, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593, 49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469, 12582917, 25165843 #endif // ASIO_HASH_MAP_BUCKETS }; const std::size_t nth_size = sizeof(sizes) / sizeof(std::size_t) - 1; for (std::size_t i = 0; i < nth_size; ++i) if (num_elems < sizes[i]) return sizes[i]; return sizes[nth_size]; } // Re-initialise the hash from the values already contained in the list. void rehash(std::size_t num_buckets) { if (num_buckets == num_buckets_) return; num_buckets_ = num_buckets; ASIO_ASSERT(num_buckets_ != 0); iterator end_iter = values_.end(); // Update number of buckets and initialise all buckets to empty. bucket_type* tmp = new bucket_type[num_buckets_]; delete[] buckets_; buckets_ = tmp; for (std::size_t i = 0; i < num_buckets_; ++i) buckets_[i].first = buckets_[i].last = end_iter; // Put all values back into the hash. iterator iter = values_.begin(); while (iter != end_iter) { std::size_t bucket = calculate_hash_value(iter->first) % num_buckets_; if (buckets_[bucket].last == end_iter) { buckets_[bucket].first = buckets_[bucket].last = iter++; } else if (++buckets_[bucket].last == iter) { ++iter; } else { values_.splice(buckets_[bucket].last, values_, iter++); --buckets_[bucket].last; } } } // Insert an element into the values list by splicing from the spares list, // if a spare is available, and otherwise by inserting a new element. iterator values_insert(iterator it, const value_type& v) { if (spares_.empty()) { return values_.insert(it, v); } else { spares_.front() = v; values_.splice(it, spares_, spares_.begin()); return --it; } } // Erase an element from the values list by splicing it to the spares list. void values_erase(iterator it) { *it = value_type(); spares_.splice(spares_.begin(), values_, it); } // The number of elements in the hash. std::size_t size_; // The list of all values in the hash map. std::list values_; // The list of spare nodes waiting to be recycled. Assumes that POD types only // are stored in the hash map. std::list spares_; // The type for a bucket in the hash table. struct bucket_type { iterator first; iterator last; }; // The buckets in the hash. bucket_type* buckets_; // The number of buckets in the hash. std::size_t num_buckets_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HASH_MAP_HPP galera-3-25.3.20/asio/asio/detail/descriptor_ops.hpp0000644000015300001660000000613713042054732022045 0ustar jenkinsjenkins// // detail/descriptor_ops.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_OPS_HPP #define ASIO_DETAIL_DESCRIPTOR_OPS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include #include "asio/error_code.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace descriptor_ops { // Descriptor state bits. enum { // The user wants a non-blocking descriptor. user_set_non_blocking = 1, // The descriptor has been set non-blocking. internal_non_blocking = 2, // Helper "state" used to determine whether the descriptor is non-blocking. non_blocking = user_set_non_blocking | internal_non_blocking, // The descriptor may have been dup()-ed. possible_dup = 4 }; typedef unsigned char state_type; template inline ReturnType error_wrapper(ReturnType return_value, asio::error_code& ec) { ec = asio::error_code(errno, asio::error::get_system_category()); return return_value; } ASIO_DECL int open(const char* path, int flags, asio::error_code& ec); ASIO_DECL int close(int d, state_type& state, asio::error_code& ec); ASIO_DECL bool set_user_non_blocking(int d, state_type& state, bool value, asio::error_code& ec); ASIO_DECL bool set_internal_non_blocking(int d, state_type& state, bool value, asio::error_code& ec); typedef iovec buf; ASIO_DECL std::size_t sync_read(int d, state_type state, buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec); ASIO_DECL bool non_blocking_read(int d, buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred); ASIO_DECL std::size_t sync_write(int d, state_type state, const buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec); ASIO_DECL bool non_blocking_write(int d, const buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred); ASIO_DECL int ioctl(int d, state_type& state, long cmd, ioctl_arg_type* arg, asio::error_code& ec); ASIO_DECL int fcntl(int d, int cmd, asio::error_code& ec); ASIO_DECL int fcntl(int d, int cmd, long arg, asio::error_code& ec); ASIO_DECL int poll_read(int d, state_type state, asio::error_code& ec); ASIO_DECL int poll_write(int d, state_type state, asio::error_code& ec); } // namespace descriptor_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/descriptor_ops.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_OPS_HPP galera-3-25.3.20/asio/asio/detail/impl/0000755000015300001660000000000013042054732017227 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/detail/impl/posix_mutex.ipp0000644000015300001660000000212313042054732022323 0ustar jenkinsjenkins// // detail/impl/posix_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP #define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_mutex.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_mutex::posix_mutex() { int error = ::pthread_mutex_init(&mutex_, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "mutex"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP galera-3-25.3.20/asio/asio/detail/impl/throw_error.ipp0000644000015300001660000000351213042054732022316 0ustar jenkinsjenkins// // detail/impl/throw_error.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP #define ASIO_DETAIL_IMPL_THROW_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void do_throw_error(const asio::error_code& err) { asio::system_error e(err); asio::detail::throw_exception(e); } void do_throw_error(const asio::error_code& err, const char* location) { // boostify: non-boost code starts here #if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // Microsoft's implementation of std::system_error is non-conformant in that // it ignores the error code's message when a "what" string is supplied. We'll // work around this by explicitly formatting the "what" string. std::string what_msg = location; what_msg += ": "; what_msg += err.message(); asio::system_error e(err, what_msg); asio::detail::throw_exception(e); #else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // boostify: non-boost code ends here asio::system_error e(err, location); asio::detail::throw_exception(e); // boostify: non-boost code starts here #endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // boostify: non-boost code ends here } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP galera-3-25.3.20/asio/asio/detail/impl/handler_tracking.ipp0000644000015300001660000002043413042054732023243 0ustar jenkinsjenkins// // detail/impl/handler_tracking.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP #define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_HANDLER_TRACKING) #include #include #include "asio/detail/handler_tracking.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/time_traits.hpp" #else // defined(ASIO_HAS_BOOST_DATE_TIME) # if defined(ASIO_HAS_STD_CHRONO) # include # elif defined(ASIO_HAS_BOOST_CHRONO) # include # endif # include "asio/detail/chrono_time_traits.hpp" # include "asio/wait_traits.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #if !defined(ASIO_WINDOWS) # include #endif // !defined(ASIO_WINDOWS) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct handler_tracking_timestamp { uint64_t seconds; uint64_t microseconds; handler_tracking_timestamp() { #if defined(ASIO_HAS_BOOST_DATE_TIME) boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); boost::posix_time::time_duration now = boost::posix_time::microsec_clock::universal_time() - epoch; #elif defined(ASIO_HAS_STD_CHRONO) typedef chrono_time_traits > traits_helper; traits_helper::posix_time_duration now( std::chrono::system_clock::now().time_since_epoch()); #elif defined(ASIO_HAS_BOOST_CHRONO) typedef chrono_time_traits > traits_helper; traits_helper::posix_time_duration now( boost::chrono::system_clock::now().time_since_epoch()); #endif seconds = static_cast(now.total_seconds()); microseconds = static_cast(now.total_microseconds() % 1000000); } }; struct handler_tracking::tracking_state { static_mutex mutex_; uint64_t next_id_; tss_ptr* current_completion_; }; handler_tracking::tracking_state* handler_tracking::get_state() { static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0 }; return &state; } void handler_tracking::init() { static tracking_state* state = get_state(); state->mutex_.init(); static_mutex::scoped_lock lock(state->mutex_); if (state->current_completion_ == 0) state->current_completion_ = new tss_ptr; } void handler_tracking::creation(handler_tracking::tracked_handler* h, const char* object_type, void* object, const char* op_name) { static tracking_state* state = get_state(); static_mutex::scoped_lock lock(state->mutex_); h->id_ = state->next_id_++; lock.unlock(); handler_tracking_timestamp timestamp; uint64_t current_id = 0; if (completion* current_completion = *state->current_completion_) current_id = current_completion->id_; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, current_id, h->id_, object_type, object, op_name); } handler_tracking::completion::completion(handler_tracking::tracked_handler* h) : id_(h->id_), invoked_(false), next_(*get_state()->current_completion_) { *get_state()->current_completion_ = this; } handler_tracking::completion::~completion() { if (id_) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%c%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%c%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, invoked_ ? '!' : '~', id_); } *get_state()->current_completion_ = next_; } void handler_tracking::completion::invocation_begin() { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value()); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, std::size_t bytes_transferred) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), static_cast(bytes_transferred)); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, int signal_number) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), signal_number); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, const char* arg) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), arg); invoked_ = true; } void handler_tracking::completion::invocation_end() { if (id_) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|<%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|<%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_); id_ = 0; } } void handler_tracking::operation(const char* object_type, void* object, const char* op_name) { static tracking_state* state = get_state(); handler_tracking_timestamp timestamp; unsigned long long current_id = 0; if (completion* current_completion = *state->current_completion_) current_id = current_completion->id_; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, current_id, object_type, object, op_name); } void handler_tracking::write_line(const char* format, ...) { using namespace std; // For sprintf (or equivalent). va_list args; va_start(args, format); char line[256] = ""; #if defined(ASIO_HAS_SECURE_RTL) int length = vsprintf_s(line, sizeof(line), format, args); #else // defined(ASIO_HAS_SECURE_RTL) int length = vsprintf(line, format, args); #endif // defined(ASIO_HAS_SECURE_RTL) va_end(args); #if defined(ASIO_WINDOWS) HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE); DWORD bytes_written = 0; ::WriteFile(stderr_handle, line, length, &bytes_written, 0); #else // defined(ASIO_WINDOWS) ::write(STDERR_FILENO, line, length); #endif // defined(ASIO_WINDOWS) } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP galera-3-25.3.20/asio/asio/detail/impl/win_event.ipp0000644000015300001660000000276713042054732021753 0ustar jenkinsjenkins// // detail/win_event.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP #define ASIO_DETAIL_IMPL_WIN_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_event.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_event::win_event() : state_(0) { events_[0] = ::CreateEvent(0, true, false, 0); if (!events_[0]) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } events_[1] = ::CreateEvent(0, false, false, 0); if (!events_[1]) { DWORD last_error = ::GetLastError(); ::CloseHandle(events_[0]); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } } win_event::~win_event() { ::CloseHandle(events_[0]); ::CloseHandle(events_[1]); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP galera-3-25.3.20/asio/asio/detail/impl/socket_ops.ipp0000644000015300001660000027333313042054732022125 0ustar jenkinsjenkins// // detail/impl/socket_ops.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPS_IPP #define ASIO_DETAIL_SOCKET_OPS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include #include "asio/detail/assert.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include # include # include #endif // defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \ || defined(__MACH__) && defined(__APPLE__) # if defined(ASIO_HAS_PTHREADS) # include # endif // defined(ASIO_HAS_PTHREADS) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // || defined(__MACH__) && defined(__APPLE__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_ops { #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) struct msghdr { int msg_namelen; }; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if defined(__hpux) // HP-UX doesn't declare these functions extern "C", so they are declared again // here to avoid linker errors about undefined symbols. extern "C" char* if_indextoname(unsigned int, char*); extern "C" unsigned int if_nametoindex(const char*); #endif // defined(__hpux) #endif // !defined(ASIO_WINDOWS_RUNTIME) inline void clear_last_error() { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) WSASetLastError(0); #else errno = 0; #endif } #if !defined(ASIO_WINDOWS_RUNTIME) template inline ReturnType error_wrapper(ReturnType return_value, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(WSAGetLastError(), asio::error::get_system_category()); #else ec = asio::error_code(errno, asio::error::get_system_category()); #endif return return_value; } template inline socket_type call_accept(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0; socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0); if (addrlen) *addrlen = (std::size_t)tmp_addrlen; return result; } socket_type accept(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return invalid_socket; } clear_last_error(); socket_type new_s = error_wrapper(call_accept( &msghdr::msg_namelen, s, addr, addrlen), ec); if (new_s == invalid_socket) return new_s; #if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) int optval = 1; int result = error_wrapper(::setsockopt(new_s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); if (result != 0) { ::close(new_s); return invalid_socket; } #endif ec = asio::error_code(); return new_s; } socket_type sync_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { // Accept a socket. for (;;) { // Try to complete the operation without blocking. socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec); // Check if operation succeeded. if (new_socket != invalid_socket) return new_socket; // Operation failed. if (ec == asio::error::would_block || ec == asio::error::try_again) { if (state & user_set_non_blocking) return invalid_socket; // Fall through to retry operation. } else if (ec == asio::error::connection_aborted) { if (state & enable_connection_aborted) return invalid_socket; // Fall through to retry operation. } #if defined(EPROTO) else if (ec.value() == EPROTO) { if (state & enable_connection_aborted) return invalid_socket; // Fall through to retry operation. } #endif // defined(EPROTO) else return invalid_socket; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return invalid_socket; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_accept(socket_type s, void* output_buffer, DWORD address_length, socket_addr_type* addr, std::size_t* addrlen, socket_type new_socket, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_aborted; if (!ec) { // Get the address of the peer. if (addr && addrlen) { LPSOCKADDR local_addr = 0; int local_addr_length = 0; LPSOCKADDR remote_addr = 0; int remote_addr_length = 0; GetAcceptExSockaddrs(output_buffer, 0, address_length, address_length, &local_addr, &local_addr_length, &remote_addr, &remote_addr_length); if (static_cast(remote_addr_length) > *addrlen) { ec = asio::error::invalid_argument; } else { using namespace std; // For memcpy. memcpy(addr, remote_addr, remote_addr_length); *addrlen = static_cast(remote_addr_length); } } // Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname // and getpeername will work on the accepted socket. SOCKET update_ctx_param = s; socket_ops::state_type state = 0; socket_ops::setsockopt(new_socket, state, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, &update_ctx_param, sizeof(SOCKET), ec); } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, socket_type& new_socket) { for (;;) { // Accept the waiting connection. new_socket = socket_ops::accept(s, addr, addrlen, ec); // Check if operation succeeded. if (new_socket != invalid_socket) return true; // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Operation failed. if (ec == asio::error::would_block || ec == asio::error::try_again) { if (state & user_set_non_blocking) return true; // Fall through to retry operation. } else if (ec == asio::error::connection_aborted) { if (state & enable_connection_aborted) return true; // Fall through to retry operation. } #if defined(EPROTO) else if (ec.value() == EPROTO) { if (state & enable_connection_aborted) return true; // Fall through to retry operation. } #endif // defined(EPROTO) else return true; return false; } } #endif // defined(ASIO_HAS_IOCP) template inline int call_bind(SockLenType msghdr::*, socket_type s, const socket_addr_type* addr, std::size_t addrlen) { return ::bind(s, addr, (SockLenType)addrlen); } int bind(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_bind( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } int close(socket_type s, state_type& state, bool destruction, asio::error_code& ec) { int result = 0; if (s != invalid_socket) { // We don't want the destructor to block, so set the socket to linger in // the background. If the user doesn't like this behaviour then they need // to explicitly close the socket. if (destruction && (state & user_set_linger)) { ::linger opt; opt.l_onoff = 0; opt.l_linger = 0; asio::error_code ignored_ec; socket_ops::setsockopt(s, state, SOL_SOCKET, SO_LINGER, &opt, sizeof(opt), ignored_ec); } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::closesocket(s), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::close(s), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result != 0 && (ec == asio::error::would_block || ec == asio::error::try_again)) { // According to UNIX Network Programming Vol. 1, it is possible for // close() to fail with EWOULDBLOCK under certain circumstances. What // isn't clear is the state of the descriptor after this error. The one // current OS where this behaviour is seen, Windows, says that the socket // remains open. Therefore we'll put the descriptor back into blocking // mode and have another attempt at closing it. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = 0; ::ioctlsocket(s, FIONBIO, &arg); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(__SYMBIAN32__) int flags = ::fcntl(s, F_GETFL, 0); if (flags >= 0) ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK); # else // defined(__SYMBIAN32__) ioctl_arg_type arg = 0; ::ioctl(s, FIONBIO, &arg); # endif // defined(__SYMBIAN32__) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) state &= ~non_blocking; clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::closesocket(s), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::close(s), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } } if (result == 0) ec = asio::error_code(); return result; } bool set_user_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); #elif defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); if (result >= 0) { clear_last_error(); int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); } #else ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); #endif if (result >= 0) { ec = asio::error_code(); if (value) state |= user_set_non_blocking; else { // Clearing the user-set non-blocking mode always overrides any // internally-set non-blocking flag. Any subsequent asynchronous // operations will need to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } return true; } return false; } bool set_internal_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } if (!value && (state & user_set_non_blocking)) { // It does not make sense to clear the internal non-blocking flag if the // user still wants non-blocking behaviour. Return an error and let the // caller figure out whether to update the user-set non-blocking flag. ec = asio::error::invalid_argument; return false; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); #elif defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); if (result >= 0) { clear_last_error(); int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); } #else ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); #endif if (result >= 0) { ec = asio::error_code(); if (value) state |= internal_non_blocking; else state &= ~internal_non_blocking; return true; } return false; } int shutdown(socket_type s, int what, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(::shutdown(s, what), ec); if (result == 0) ec = asio::error_code(); return result; } template inline int call_connect(SockLenType msghdr::*, socket_type s, const socket_addr_type* addr, std::size_t addrlen) { return ::connect(s, addr, (SockLenType)addrlen); } int connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_connect( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); #if defined(__linux__) else if (ec == asio::error::try_again) ec = asio::error::no_buffer_space; #endif // defined(__linux__) return result; } void sync_connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { // Perform the connect operation. socket_ops::connect(s, addr, addrlen, ec); if (ec != asio::error::in_progress && ec != asio::error::would_block) { // The connect operation finished immediately. return; } // Wait for socket to become ready. if (socket_ops::poll_connect(s, ec) < 0) return; // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec) == socket_error_retval) return; // Return the result of the connect operation. ec = asio::error_code(connect_error, asio::error::get_system_category()); } #if defined(ASIO_HAS_IOCP) void complete_iocp_connect(socket_type s, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. switch (ec.value()) { case ERROR_CONNECTION_REFUSED: ec = asio::error::connection_refused; break; case ERROR_NETWORK_UNREACHABLE: ec = asio::error::network_unreachable; break; case ERROR_HOST_UNREACHABLE: ec = asio::error::host_unreachable; break; case ERROR_SEM_TIMEOUT: ec = asio::error::timed_out; break; default: break; } if (!ec) { // Need to set the SO_UPDATE_CONNECT_CONTEXT option so that getsockname // and getpeername will work on the connected socket. socket_ops::state_type state = 0; const int so_update_connect_context = 0x7010; socket_ops::setsockopt(s, state, SOL_SOCKET, so_update_connect_context, 0, 0, ec); } } #endif // defined(ASIO_HAS_IOCP) bool non_blocking_connect(socket_type s, asio::error_code& ec) { // Check if the connect operation has finished. This is required since we may // get spurious readiness notifications from the reactor. #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set write_fds; FD_ZERO(&write_fds); FD_SET(s, &write_fds); fd_set except_fds; FD_ZERO(&except_fds); FD_SET(s, &except_fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; int ready = ::select(s + 1, 0, &write_fds, &except_fds, &zero_timeout); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; int ready = ::poll(&fds, 1, 0); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (ready == 0) { // The asynchronous connect operation is still in progress. return false; } // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec) == 0) { if (connect_error) { ec = asio::error_code(connect_error, asio::error::get_system_category()); } else ec = asio::error_code(); } return true; } int socketpair(int af, int type, int protocol, socket_type sv[2], asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(af); (void)(type); (void)(protocol); (void)(sv); ec = asio::error::operation_not_supported; return socket_error_retval; #else clear_last_error(); int result = error_wrapper(::socketpair(af, type, protocol, sv), ec); if (result == 0) ec = asio::error_code(); return result; #endif } bool sockatmark(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } #if defined(SIOCATMARK) ioctl_arg_type value = 0; # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, SIOCATMARK, &value), ec); # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctl(s, SIOCATMARK, &value), ec); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result == 0) ec = asio::error_code(); # if defined(ENOTTY) if (ec.value() == ENOTTY) ec = asio::error::not_socket; # endif // defined(ENOTTY) #else // defined(SIOCATMARK) int value = error_wrapper(::sockatmark(s), ec); if (value != -1) ec = asio::error_code(); #endif // defined(SIOCATMARK) return ec ? false : value != 0; } size_t available(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } ioctl_arg_type value = 0; #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, FIONREAD, &value), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctl(s, FIONREAD, &value), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result == 0) ec = asio::error_code(); #if defined(ENOTTY) if (ec.value() == ENOTTY) ec = asio::error::not_socket; #endif // defined(ENOTTY) return ec ? static_cast(0) : static_cast(value); } int listen(socket_type s, int backlog, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(::listen(s, backlog), ec); if (result == 0) ec = asio::error_code(); return result; } inline void init_buf_iov_base(void*& base, void* addr) { base = addr; } template inline void init_buf_iov_base(T& base, void* addr) { base = static_cast(addr); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef WSABUF buf; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef iovec buf; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) void init_buf(buf& b, void* data, size_t size) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) b.buf = static_cast(data); b.len = static_cast(size); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) init_buf_iov_base(b.iov_base, data); b.iov_len = size; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } void init_buf(buf& b, const void* data, size_t size) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) b.buf = static_cast(const_cast(data)); b.len = static_cast(size); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) init_buf_iov_base(b.iov_base, const_cast(data)); b.iov_len = size; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } inline void init_msghdr_msg_name(void*& name, socket_addr_type* addr) { name = addr; } inline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr) { name = const_cast(addr); } template inline void init_msghdr_msg_name(T& name, socket_addr_type* addr) { name = reinterpret_cast(addr); } template inline void init_msghdr_msg_name(T& name, const socket_addr_type* addr) { name = reinterpret_cast(const_cast(addr)); } signed_size_type recv(socket_type s, buf* bufs, size_t count, int flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Receive some data. DWORD recv_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = error_wrapper(::WSARecv(s, bufs, recv_buf_count, &bytes_transferred, &recv_flags, 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recv(socket_type s, state_type state, buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream is a no-op. if (all_empty && (state & stream_oriented)) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Check for EOF. if ((state & stream_oriented) && bytes == 0) { ec = asio::error::eof; return 0; } // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recv(state_type state, const weak_cancel_token_type& cancel_token, bool all_empty, asio::error_code& ec, size_t bytes_transferred) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } // Check for connection closed. else if (!ec && bytes_transferred == 0 && (state & stream_oriented) != 0 && !all_empty) { ec = asio::error::eof; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recv(socket_type s, buf* bufs, size_t count, int flags, bool is_stream, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); // Check for end of stream. if (is_stream && bytes == 0) { ec = asio::error::eof; return true; } // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Receive some data. DWORD recv_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD recv_flags = flags; int tmp_addrlen = (int)*addrlen; int result = error_wrapper(::WSARecvFrom(s, bufs, recv_buf_count, &bytes_transferred, &recv_flags, addr, &tmp_addrlen, 0, 0), ec); *addrlen = (std::size_t)tmp_addrlen; if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); init_msghdr_msg_name(msg.msg_name, addr); msg.msg_namelen = static_cast(*addrlen); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); *addrlen = msg.msg_namelen; if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recvfrom( s, bufs, count, flags, addr, addrlen, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recvfrom( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recvfrom( s, bufs, count, flags, addr, addrlen, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) out_flags = 0; return socket_ops::recv(s, bufs, count, in_flags, ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, in_flags), ec); if (result >= 0) { ec = asio::error_code(); out_flags = msg.msg_flags; } else out_flags = 0; return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recvmsg(socket_type s, state_type state, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recvmsg( s, bufs, count, in_flags, out_flags, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recvmsg( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recvmsg( s, bufs, count, in_flags, out_flags, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Send the data. DWORD send_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD send_flags = flags; int result = error_wrapper(::WSASend(s, const_cast(bufs), send_buf_count, &bytes_transferred, send_flags, 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = const_cast(bufs); msg.msg_iovlen = static_cast(count); #if defined(__linux__) flags |= MSG_NOSIGNAL; #endif // defined(__linux__) signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_send(socket_type s, state_type state, const buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes to a stream is a no-op. if (all_empty && (state & stream_oriented)) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_write(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_send( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Write some data. signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Send the data. DWORD send_buf_count = static_cast(count); DWORD bytes_transferred = 0; int result = error_wrapper(::WSASendTo(s, const_cast(bufs), send_buf_count, &bytes_transferred, flags, addr, static_cast(addrlen), 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); init_msghdr_msg_name(msg.msg_name, addr); msg.msg_namelen = static_cast(addrlen); msg.msg_iov = const_cast(bufs); msg.msg_iovlen = static_cast(count); #if defined(__linux__) flags |= MSG_NOSIGNAL; #endif // defined(__linux__) signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_sendto(socket_type s, state_type state, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Write some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::sendto( s, bufs, count, flags, addr, addrlen, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_write(s, 0, ec) < 0) return 0; } } #if !defined(ASIO_HAS_IOCP) bool non_blocking_sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Write some data. signed_size_type bytes = socket_ops::sendto( s, bufs, count, flags, addr, addrlen, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // !defined(ASIO_HAS_IOCP) socket_type socket(int af, int type, int protocol, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) socket_type s = error_wrapper(::WSASocketW(af, type, protocol, 0, 0, WSA_FLAG_OVERLAPPED), ec); if (s == invalid_socket) return s; if (af == ASIO_OS_DEF(AF_INET6)) { // Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to // false. This will only succeed on Windows Vista and later versions of // Windows, where a dual-stack IPv4/v6 implementation is available. DWORD optval = 0; ::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&optval), sizeof(optval)); } ec = asio::error_code(); return s; #elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) socket_type s = error_wrapper(::socket(af, type, protocol), ec); if (s == invalid_socket) return s; int optval = 1; int result = error_wrapper(::setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); if (result != 0) { ::close(s); return invalid_socket; } return s; #else int s = error_wrapper(::socket(af, type, protocol), ec); if (s >= 0) ec = asio::error_code(); return s; #endif } template inline int call_setsockopt(SockLenType msghdr::*, socket_type s, int level, int optname, const void* optval, std::size_t optlen) { return ::setsockopt(s, level, optname, (const char*)optval, (SockLenType)optlen); } int setsockopt(socket_type s, state_type& state, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } if (level == custom_socket_option_level && optname == always_fail_option) { ec = asio::error::invalid_argument; return socket_error_retval; } if (level == custom_socket_option_level && optname == enable_connection_aborted_option) { if (optlen != sizeof(int)) { ec = asio::error::invalid_argument; return socket_error_retval; } if (*static_cast(optval)) state |= enable_connection_aborted; else state &= ~enable_connection_aborted; ec = asio::error_code(); return 0; } if (level == SOL_SOCKET && optname == SO_LINGER) state |= user_set_linger; #if defined(__BORLANDC__) // Mysteriously, using the getsockopt and setsockopt functions directly with // Borland C++ results in incorrect values being set and read. The bug can be // worked around by using function addresses resolved with GetProcAddress. if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int); if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, "setsockopt")) { clear_last_error(); return error_wrapper(sso(s, level, optname, reinterpret_cast(optval), static_cast(optlen)), ec); } } ec = asio::error::fault; return socket_error_retval; #else // defined(__BORLANDC__) clear_last_error(); int result = error_wrapper(call_setsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); if (result == 0) { ec = asio::error_code(); #if defined(__MACH__) && defined(__APPLE__) \ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) // To implement portable behaviour for SO_REUSEADDR with UDP sockets we // need to also set SO_REUSEPORT on BSD-based platforms. if ((state & datagram_oriented) && level == SOL_SOCKET && optname == SO_REUSEADDR) { call_setsockopt(&msghdr::msg_namelen, s, SOL_SOCKET, SO_REUSEPORT, optval, optlen); } #endif } return result; #endif // defined(__BORLANDC__) } template inline int call_getsockopt(SockLenType msghdr::*, socket_type s, int level, int optname, void* optval, std::size_t* optlen) { SockLenType tmp_optlen = (SockLenType)*optlen; int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen); *optlen = (std::size_t)tmp_optlen; return result; } int getsockopt(socket_type s, state_type state, int level, int optname, void* optval, size_t* optlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } if (level == custom_socket_option_level && optname == always_fail_option) { ec = asio::error::invalid_argument; return socket_error_retval; } if (level == custom_socket_option_level && optname == enable_connection_aborted_option) { if (*optlen != sizeof(int)) { ec = asio::error::invalid_argument; return socket_error_retval; } *static_cast(optval) = (state & enable_connection_aborted) ? 1 : 0; ec = asio::error_code(); return 0; } #if defined(__BORLANDC__) // Mysteriously, using the getsockopt and setsockopt functions directly with // Borland C++ results in incorrect values being set and read. The bug can be // worked around by using function addresses resolved with GetProcAddress. if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*); if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, "getsockopt")) { clear_last_error(); int tmp_optlen = static_cast(*optlen); int result = error_wrapper(gso(s, level, optname, reinterpret_cast(optval), &tmp_optlen), ec); *optlen = static_cast(tmp_optlen); if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) { // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are // only supported on Windows Vista and later. To simplify program logic // we will fake success of getting this option and specify that the // value is non-zero (i.e. true). This corresponds to the behavior of // IPv6 sockets on Windows platforms pre-Vista. *static_cast(optval) = 1; ec = asio::error_code(); } return result; } } ec = asio::error::fault; return socket_error_retval; #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) { // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only // supported on Windows Vista and later. To simplify program logic we will // fake success of getting this option and specify that the value is // non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets // on Windows platforms pre-Vista. *static_cast(optval) = 1; ec = asio::error_code(); } if (result == 0) ec = asio::error_code(); return result; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); #if defined(__linux__) if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int) && (optname == SO_SNDBUF || optname == SO_RCVBUF)) { // On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel // to set the buffer size to N*2. Linux puts additional stuff into the // buffers so that only about half is actually available to the application. // The retrieved value is divided by 2 here to make it appear as though the // correct value has been set. *static_cast(optval) /= 2; } #endif // defined(__linux__) if (result == 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } template inline int call_getpeername(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = (SockLenType)*addrlen; int result = ::getpeername(s, addr, &tmp_addrlen); *addrlen = (std::size_t)tmp_addrlen; return result; } int getpeername(socket_type s, socket_addr_type* addr, std::size_t* addrlen, bool cached, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (cached) { // Check if socket is still connected. DWORD connect_time = 0; size_t connect_time_len = sizeof(connect_time); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME, &connect_time, &connect_time_len, ec) == socket_error_retval) { return socket_error_retval; } if (connect_time == 0xFFFFFFFF) { ec = asio::error::not_connected; return socket_error_retval; } // The cached value is still valid. ec = asio::error_code(); return 0; } #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)cached; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getpeername( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } template inline int call_getsockname(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = (SockLenType)*addrlen; int result = ::getsockname(s, addr, &tmp_addrlen); *addrlen = (std::size_t)tmp_addrlen; return result; } int getsockname(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_getsockname( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } int ioctl(socket_type s, state_type& state, int cmd, ioctl_arg_type* arg, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, cmd, arg), ec); #elif defined(__MACH__) && defined(__APPLE__) \ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) int result = error_wrapper(::ioctl(s, static_cast(cmd), arg), ec); #else int result = error_wrapper(::ioctl(s, cmd, arg), ec); #endif if (result >= 0) { ec = asio::error_code(); // When updating the non-blocking mode we always perform the ioctl syscall, // even if the flags would otherwise indicate that the socket is already in // the correct state. This ensures that the underlying socket is put into // the state that has been requested by the user. If the ioctl syscall was // successful then we need to update the flags to match. if (cmd == static_cast(FIONBIO)) { if (*arg) { state |= user_set_non_blocking; } else { // Clearing the non-blocking mode always overrides any internally-set // non-blocking flag. Any subsequent asynchronous operations will need // to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } } } return result; } int select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, timeval* timeout, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (!readfds && !writefds && !exceptfds && timeout) { DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; if (milliseconds == 0) milliseconds = 1; // Force context switch. ::Sleep(milliseconds); ec = asio::error_code(); return 0; } // The select() call allows timeout values measured in microseconds, but the // system clock (as wrapped by boost::posix_time::microsec_clock) typically // has a resolution of 10 milliseconds. This can lead to a spinning select // reactor, meaning increased CPU usage, when waiting for the earliest // scheduled timeout if it's less than 10 milliseconds away. To avoid a tight // spin we'll use a minimum timeout of 1 millisecond. if (timeout && timeout->tv_sec == 0 && timeout->tv_usec > 0 && timeout->tv_usec < 1000) timeout->tv_usec = 1000; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if defined(__hpux) && defined(__SELECT) timespec ts; ts.tv_sec = timeout ? timeout->tv_sec : 0; ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0; return error_wrapper(::pselect(nfds, readfds, writefds, exceptfds, timeout ? &ts : 0, 0), ec); #else int result = error_wrapper(::select(nfds, readfds, writefds, exceptfds, timeout), ec); if (result >= 0) ec = asio::error_code(); return result; #endif } int poll_read(socket_type s, state_type state, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0; clear_last_error(); int result = error_wrapper(::select(s + 1, &fds, 0, 0, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLIN; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_write(socket_type s, state_type state, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0; clear_last_error(); int result = error_wrapper(::select(s + 1, 0, &fds, 0, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_connect(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set write_fds; FD_ZERO(&write_fds); FD_SET(s, &write_fds); fd_set except_fds; FD_ZERO(&except_fds); FD_SET(s, &except_fds); clear_last_error(); int result = error_wrapper(::select( s + 1, 0, &write_fds, &except_fds, 0), ec); if (result >= 0) ec = asio::error_code(); return result; #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, -1), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) } #endif // !defined(ASIO_WINDOWS_RUNTIME) const char* inet_ntop(int af, const void* src, char* dest, size_t length, unsigned long scope_id, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) using namespace std; // For sprintf. const unsigned char* bytes = static_cast(src); if (af == ASIO_OS_DEF(AF_INET)) { sprintf_s(dest, length, "%u.%u.%u.%u", bytes[0], bytes[1], bytes[2], bytes[3]); return dest; } else if (af == ASIO_OS_DEF(AF_INET6)) { size_t n = 0, b = 0, z = 0; while (n < length && b < 16) { if (bytes[b] == 0 && bytes[b + 1] == 0 && z == 0) { do b += 2; while (b < 16 && bytes[b] == 0 && bytes[b + 1] == 0); n += sprintf_s(dest + n, length - n, ":%s", b < 16 ? "" : ":"), ++z; } else { n += sprintf_s(dest + n, length - n, "%s%x", b ? ":" : "", (static_cast(bytes[b]) << 8) | bytes[b + 1]); b += 2; } } if (scope_id) n += sprintf_s(dest + n, length - n, "%%%lu", scope_id); return dest; } else { ec = asio::error::address_family_not_supported; return 0; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For memcpy. if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) { ec = asio::error::address_family_not_supported; return 0; } union { socket_addr_type base; sockaddr_storage_type storage; sockaddr_in4_type v4; sockaddr_in6_type v6; } address; DWORD address_length; if (af == ASIO_OS_DEF(AF_INET)) { address_length = sizeof(sockaddr_in4_type); address.v4.sin_family = ASIO_OS_DEF(AF_INET); address.v4.sin_port = 0; memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type)); } else // AF_INET6 { address_length = sizeof(sockaddr_in6_type); address.v6.sin6_family = ASIO_OS_DEF(AF_INET6); address.v6.sin6_port = 0; address.v6.sin6_flowinfo = 0; address.v6.sin6_scope_id = scope_id; memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type)); } DWORD string_length = static_cast(length); #if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR)); int result = error_wrapper(::WSAAddressToStringW(&address.base, address_length, 0, string_buffer, &string_length), ec); ::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1, dest, static_cast(length), 0, 0); #else int result = error_wrapper(::WSAAddressToStringA( &address.base, address_length, 0, dest, &string_length), ec); #endif // Windows may set error code on success. if (result != socket_error_retval) ec = asio::error_code(); // Windows may not set an error code on failure. else if (result == socket_error_retval && !ec) ec = asio::error::invalid_argument; return result == socket_error_retval ? 0 : dest; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) const char* result = error_wrapper(::inet_ntop( af, src, dest, static_cast(length)), ec); if (result == 0 && !ec) ec = asio::error::invalid_argument; if (result != 0 && af == ASIO_OS_DEF(AF_INET6) && scope_id != 0) { using namespace std; // For strcat and sprintf. char if_name[IF_NAMESIZE + 1] = "%"; const in6_addr_type* ipv6_address = static_cast(src); bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); if ((!is_link_local && !is_multicast_link_local) || if_indextoname(static_cast(scope_id), if_name + 1) == 0) sprintf(if_name + 1, "%lu", scope_id); strcat(dest, if_name); } return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } int inet_pton(int af, const char* src, void* dest, unsigned long* scope_id, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) using namespace std; // For sscanf. unsigned char* bytes = static_cast(dest); if (af == ASIO_OS_DEF(AF_INET)) { unsigned int b0, b1, b2, b3; if (sscanf_s(src, "%u.%u.%u.%u", &b0, &b1, &b2, &b3) != 4) { ec = asio::error::invalid_argument; return -1; } if (b0 > 255 || b1 > 255 || b2 > 255 || b3 > 255) { ec = asio::error::invalid_argument; return -1; } bytes[0] = static_cast(b0); bytes[1] = static_cast(b1); bytes[2] = static_cast(b2); bytes[3] = static_cast(b3); ec = asio::error_code(); return 1; } else if (af == ASIO_OS_DEF(AF_INET6)) { unsigned char* bytes = static_cast(dest); std::memset(bytes, 0, 16); unsigned char back_bytes[16] = { 0 }; int num_front_bytes = 0, num_back_bytes = 0; const char* p = src; enum { fword, fcolon, bword, scope, done } state = fword; unsigned long current_word = 0; while (state != done) { if (current_word > 0xFFFF) { ec = asio::error::invalid_argument; return -1; } switch (state) { case fword: if (*p >= '0' && *p <= '9') current_word = current_word * 16 + *p++ - '0'; else if (*p >= 'a' && *p <= 'f') current_word = current_word * 16 + *p++ - 'a' + 10; else if (*p >= 'A' && *p <= 'F') current_word = current_word * 16 + *p++ - 'A' + 10; else { if (num_front_bytes == 16) { ec = asio::error::invalid_argument; return -1; } bytes[num_front_bytes++] = (current_word >> 8) & 0xFF; bytes[num_front_bytes++] = current_word & 0xFF; current_word = 0; if (*p == ':') state = fcolon, ++p; else if (*p == '%') state = scope, ++p; else if (*p == 0) state = done; else { ec = asio::error::invalid_argument; return -1; } } break; case fcolon: if (*p == ':') state = bword, ++p; else state = fword; break; case bword: if (*p >= '0' && *p <= '9') current_word = current_word * 16 + *p++ - '0'; else if (*p >= 'a' && *p <= 'f') current_word = current_word * 16 + *p++ - 'a' + 10; else if (*p >= 'A' && *p <= 'F') current_word = current_word * 16 + *p++ - 'A' + 10; else { if (num_front_bytes + num_back_bytes == 16) { ec = asio::error::invalid_argument; return -1; } back_bytes[num_back_bytes++] = (current_word >> 8) & 0xFF; back_bytes[num_back_bytes++] = current_word & 0xFF; current_word = 0; if (*p == ':') state = bword, ++p; else if (*p == '%') state = scope, ++p; else if (*p == 0) state = done; else { ec = asio::error::invalid_argument; return -1; } } break; case scope: if (*p >= '0' && *p <= '9') current_word = current_word * 10 + *p++ - '0'; else if (*p == 0) *scope_id = current_word, state = done; else { ec = asio::error::invalid_argument; return -1; } break; default: break; } } for (int i = 0; i < num_back_bytes; ++i) bytes[16 - num_back_bytes + i] = back_bytes[i]; ec = asio::error_code(); return 1; } else { ec = asio::error::address_family_not_supported; return -1; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For memcpy and strcmp. if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) { ec = asio::error::address_family_not_supported; return -1; } union { socket_addr_type base; sockaddr_storage_type storage; sockaddr_in4_type v4; sockaddr_in6_type v6; } address; int address_length = sizeof(sockaddr_storage_type); #if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) int num_wide_chars = static_cast(strlen(src)) + 1; LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR)); ::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars); int result = error_wrapper(::WSAStringToAddressW( wide_buffer, af, 0, &address.base, &address_length), ec); #else int result = error_wrapper(::WSAStringToAddressA( const_cast(src), af, 0, &address.base, &address_length), ec); #endif if (af == ASIO_OS_DEF(AF_INET)) { if (result != socket_error_retval) { memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type)); ec = asio::error_code(); } else if (strcmp(src, "255.255.255.255") == 0) { static_cast(dest)->s_addr = INADDR_NONE; ec = asio::error_code(); } } else // AF_INET6 { if (result != socket_error_retval) { memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type)); if (scope_id) *scope_id = address.v6.sin6_scope_id; ec = asio::error_code(); } } // Windows may not set an error code on failure. if (result == socket_error_retval && !ec) ec = asio::error::invalid_argument; if (result != socket_error_retval) ec = asio::error_code(); return result == socket_error_retval ? -1 : 1; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For strchr, memcpy and atoi. // On some platforms, inet_pton fails if an address string contains a scope // id. Detect and remove the scope id before passing the string to inet_pton. const bool is_v6 = (af == ASIO_OS_DEF(AF_INET6)); const char* if_name = is_v6 ? strchr(src, '%') : 0; char src_buf[max_addr_v6_str_len + 1]; const char* src_ptr = src; if (if_name != 0) { if (if_name - src > max_addr_v6_str_len) { ec = asio::error::invalid_argument; return 0; } memcpy(src_buf, src, if_name - src); src_buf[if_name - src] = 0; src_ptr = src_buf; } int result = error_wrapper(::inet_pton(af, src_ptr, dest), ec); if (result <= 0 && !ec) ec = asio::error::invalid_argument; if (result > 0 && is_v6 && scope_id) { using namespace std; // For strchr and atoi. *scope_id = 0; if (if_name != 0) { in6_addr_type* ipv6_address = static_cast(dest); bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); if (is_link_local || is_multicast_link_local) *scope_id = if_nametoindex(if_name + 1); if (*scope_id == 0) *scope_id = atoi(if_name + 1); } } return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } int gethostname(char* name, int namelen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) try { using namespace Windows::Foundation::Collections; using namespace Windows::Networking; using namespace Windows::Networking::Connectivity; IVectorView^ hostnames = NetworkInformation::GetHostNames(); for (unsigned i = 0; i < hostnames->Size; ++i) { HostName^ hostname = hostnames->GetAt(i); if (hostname->Type == HostNameType::DomainName) { std::wstring_convert> converter; std::string raw_name = converter.to_bytes(hostname->RawName->Data()); if (namelen > 0 && raw_name.size() < static_cast(namelen)) { strcpy_s(name, namelen, raw_name.c_str()); return 0; } } } return -1; } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return -1; } #else // defined(ASIO_WINDOWS_RUNTIME) int result = error_wrapper(::gethostname(name, namelen), ec); # if defined(ASIO_WINDOWS) if (result == 0) ec = asio::error_code(); # endif // defined(ASIO_WINDOWS) return result; #endif // defined(ASIO_WINDOWS_RUNTIME) } #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(ASIO_HAS_GETADDRINFO) // The following functions are only needed for emulation of getaddrinfo and // getnameinfo. inline asio::error_code translate_netdb_error(int error) { switch (error) { case 0: return asio::error_code(); case HOST_NOT_FOUND: return asio::error::host_not_found; case TRY_AGAIN: return asio::error::host_not_found_try_again; case NO_RECOVERY: return asio::error::no_recovery; case NO_DATA: return asio::error::no_data; default: ASIO_ASSERT(false); return asio::error::invalid_argument; } } inline hostent* gethostbyaddr(const char* addr, int length, int af, hostent* result, char* buffer, int buflength, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(buffer); (void)(buflength); hostent* retval = error_wrapper(::gethostbyaddr(addr, length, af), ec); if (!retval) return 0; ec = asio::error_code(); *result = *retval; return retval; #elif defined(__sun) || defined(__QNX__) int error = 0; hostent* retval = error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, buflength, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #elif defined(__MACH__) && defined(__APPLE__) (void)(buffer); (void)(buflength); int error = 0; hostent* retval = error_wrapper(::getipnodebyaddr( addr, length, af, &error), ec); if (error) ec = translate_netdb_error(error); if (!retval) return 0; *result = *retval; return retval; #else hostent* retval = 0; int error = 0; error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, buflength, &retval, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #endif } inline hostent* gethostbyname(const char* name, int af, struct hostent* result, char* buffer, int buflength, int ai_flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(buffer); (void)(buflength); (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } hostent* retval = error_wrapper(::gethostbyname(name), ec); if (!retval) return 0; ec = asio::error_code(); *result = *retval; return result; #elif defined(__sun) || defined(__QNX__) (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } int error = 0; hostent* retval = error_wrapper(::gethostbyname_r(name, result, buffer, buflength, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #elif defined(__MACH__) && defined(__APPLE__) (void)(buffer); (void)(buflength); int error = 0; hostent* retval = error_wrapper(::getipnodebyname( name, af, ai_flags, &error), ec); if (error) ec = translate_netdb_error(error); if (!retval) return 0; *result = *retval; return retval; #else (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } hostent* retval = 0; int error = 0; error_wrapper(::gethostbyname_r(name, result, buffer, buflength, &retval, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #endif } inline void freehostent(hostent* h) { #if defined(__MACH__) && defined(__APPLE__) if (h) ::freehostent(h); #else (void)(h); #endif } // Emulation of getaddrinfo based on implementation in: // Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998. struct gai_search { const char* host; int family; }; inline int gai_nsearch(const char* host, const addrinfo_type* hints, gai_search (&search)[2]) { int search_count = 0; if (host == 0 || host[0] == '\0') { if (hints->ai_flags & AI_PASSIVE) { // No host and AI_PASSIVE implies wildcard bind. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = "0.0.0.0"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = "0::0"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = "0::0"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = "0.0.0.0"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } else { // No host and not AI_PASSIVE means connect to local host. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } } else { // Host is specified. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } return search_count; } template inline T* gai_alloc(std::size_t size = sizeof(T)) { using namespace std; T* p = static_cast(::operator new(size, std::nothrow)); if (p) memset(p, 0, size); return p; } inline void gai_free(void* p) { ::operator delete(p); } inline void gai_strcpy(char* target, const char* source, std::size_t max_size) { using namespace std; #if defined(ASIO_HAS_SECURE_RTL) strcpy_s(target, max_size, source); #else // defined(ASIO_HAS_SECURE_RTL) *target = 0; strncat(target, source, max_size); #endif // defined(ASIO_HAS_SECURE_RTL) } enum { gai_clone_flag = 1 << 30 }; inline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints, const void* addr, int family) { using namespace std; addrinfo_type* ai = gai_alloc(); if (ai == 0) return EAI_MEMORY; ai->ai_next = 0; **next = ai; *next = &ai->ai_next; ai->ai_canonname = 0; ai->ai_socktype = hints->ai_socktype; if (ai->ai_socktype == 0) ai->ai_flags |= gai_clone_flag; ai->ai_protocol = hints->ai_protocol; ai->ai_family = family; switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = gai_alloc(); if (sinptr == 0) return EAI_MEMORY; sinptr->sin_family = ASIO_OS_DEF(AF_INET); memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type)); ai->ai_addr = reinterpret_cast(sinptr); ai->ai_addrlen = sizeof(sockaddr_in4_type); break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = gai_alloc(); if (sin6ptr == 0) return EAI_MEMORY; sin6ptr->sin6_family = ASIO_OS_DEF(AF_INET6); memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type)); ai->ai_addr = reinterpret_cast(sin6ptr); ai->ai_addrlen = sizeof(sockaddr_in6_type); break; } default: break; } return 0; } inline addrinfo_type* gai_clone(addrinfo_type* ai) { using namespace std; addrinfo_type* new_ai = gai_alloc(); if (new_ai == 0) return new_ai; new_ai->ai_next = ai->ai_next; ai->ai_next = new_ai; new_ai->ai_flags = 0; new_ai->ai_family = ai->ai_family; new_ai->ai_socktype = ai->ai_socktype; new_ai->ai_protocol = ai->ai_protocol; new_ai->ai_canonname = 0; new_ai->ai_addrlen = ai->ai_addrlen; new_ai->ai_addr = gai_alloc(ai->ai_addrlen); memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen); return new_ai; } inline int gai_port(addrinfo_type* aihead, int port, int socktype) { int num_found = 0; for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next) { if (ai->ai_flags & gai_clone_flag) { if (ai->ai_socktype != 0) { ai = gai_clone(ai); if (ai == 0) return -1; // ai now points to newly cloned entry. } } else if (ai->ai_socktype != socktype) { // Ignore if mismatch on socket type. continue; } ai->ai_socktype = socktype; switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = reinterpret_cast(ai->ai_addr); sinptr->sin_port = port; ++num_found; break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = reinterpret_cast(ai->ai_addr); sin6ptr->sin6_port = port; ++num_found; break; } default: break; } } return num_found; } inline int gai_serv(addrinfo_type* aihead, const addrinfo_type* hints, const char* serv) { using namespace std; int num_found = 0; if ( #if defined(AI_NUMERICSERV) (hints->ai_flags & AI_NUMERICSERV) || #endif isdigit(static_cast(serv[0]))) { int port = htons(atoi(serv)); if (hints->ai_socktype) { // Caller specifies socket type. int rc = gai_port(aihead, port, hints->ai_socktype); if (rc < 0) return EAI_MEMORY; num_found += rc; } else { // Caller does not specify socket type. int rc = gai_port(aihead, port, SOCK_STREAM); if (rc < 0) return EAI_MEMORY; num_found += rc; rc = gai_port(aihead, port, SOCK_DGRAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } else { // Try service name with TCP first, then UDP. if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM) { servent* sptr = getservbyname(serv, "tcp"); if (sptr != 0) { int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM) { servent* sptr = getservbyname(serv, "udp"); if (sptr != 0) { int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } } if (num_found == 0) { if (hints->ai_socktype == 0) { // All calls to getservbyname() failed. return EAI_NONAME; } else { // Service not supported for socket type. return EAI_SERVICE; } } return 0; } inline int gai_echeck(const char* host, const char* service, int flags, int family, int socktype, int protocol) { (void)(flags); (void)(protocol); // Host or service must be specified. if (host == 0 || host[0] == '\0') if (service == 0 || service[0] == '\0') return EAI_NONAME; // Check combination of family and socket type. switch (family) { case ASIO_OS_DEF(AF_UNSPEC): break; case ASIO_OS_DEF(AF_INET): case ASIO_OS_DEF(AF_INET6): if (service != 0 && service[0] != '\0') if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM) return EAI_SOCKTYPE; break; default: return EAI_FAMILY; } return 0; } inline void freeaddrinfo_emulation(addrinfo_type* aihead) { addrinfo_type* ai = aihead; while (ai) { gai_free(ai->ai_addr); gai_free(ai->ai_canonname); addrinfo_type* ainext = ai->ai_next; gai_free(ai); ai = ainext; } } inline int getaddrinfo_emulation(const char* host, const char* service, const addrinfo_type* hintsp, addrinfo_type** result) { // Set up linked list of addrinfo structures. addrinfo_type* aihead = 0; addrinfo_type** ainext = &aihead; char* canon = 0; // Supply default hints if not specified by caller. addrinfo_type hints = addrinfo_type(); hints.ai_family = ASIO_OS_DEF(AF_UNSPEC); if (hintsp) hints = *hintsp; // If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED // and AI_ALL flags. #if defined(AI_V4MAPPED) if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) hints.ai_flags &= ~AI_V4MAPPED; #endif #if defined(AI_ALL) if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) hints.ai_flags &= ~AI_ALL; #endif // Basic error checking. int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family, hints.ai_socktype, hints.ai_protocol); if (rc != 0) { freeaddrinfo_emulation(aihead); return rc; } gai_search search[2]; int search_count = gai_nsearch(host, &hints, search); for (gai_search* sptr = search; sptr < search + search_count; ++sptr) { // Check for IPv4 dotted decimal string. in4_addr_type inaddr; asio::error_code ec; if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), sptr->host, &inaddr, 0, ec) == 1) { if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != ASIO_OS_DEF(AF_INET)) { freeaddrinfo_emulation(aihead); gai_free(canon); return EAI_FAMILY; } if (sptr->family == ASIO_OS_DEF(AF_INET)) { rc = gai_aistruct(&ainext, &hints, &inaddr, ASIO_OS_DEF(AF_INET)); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); return rc; } } continue; } // Check for IPv6 hex string. in6_addr_type in6addr; if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), sptr->host, &in6addr, 0, ec) == 1) { if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != ASIO_OS_DEF(AF_INET6)) { freeaddrinfo_emulation(aihead); gai_free(canon); return EAI_FAMILY; } if (sptr->family == ASIO_OS_DEF(AF_INET6)) { rc = gai_aistruct(&ainext, &hints, &in6addr, ASIO_OS_DEF(AF_INET6)); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); return rc; } } continue; } // Look up hostname. hostent hent; char hbuf[8192] = ""; hostent* hptr = socket_ops::gethostbyname(sptr->host, sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec); if (hptr == 0) { if (search_count == 2) { // Failure is OK if there are multiple searches. continue; } freeaddrinfo_emulation(aihead); gai_free(canon); if (ec == asio::error::host_not_found) return EAI_NONAME; if (ec == asio::error::host_not_found_try_again) return EAI_AGAIN; if (ec == asio::error::no_recovery) return EAI_FAIL; if (ec == asio::error::no_data) return EAI_NONAME; return EAI_NONAME; } // Check for address family mismatch if one was specified. if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != hptr->h_addrtype) { freeaddrinfo_emulation(aihead); gai_free(canon); socket_ops::freehostent(hptr); return EAI_FAMILY; } // Save canonical name first time. if (host != 0 && host[0] != '\0' && hptr->h_name && hptr->h_name[0] && (hints.ai_flags & AI_CANONNAME) && canon == 0) { std::size_t canon_len = strlen(hptr->h_name) + 1; canon = gai_alloc(canon_len); if (canon == 0) { freeaddrinfo_emulation(aihead); socket_ops::freehostent(hptr); return EAI_MEMORY; } gai_strcpy(canon, hptr->h_name, canon_len); } // Create an addrinfo structure for each returned address. for (char** ap = hptr->h_addr_list; *ap; ++ap) { rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); socket_ops::freehostent(hptr); return EAI_FAMILY; } } socket_ops::freehostent(hptr); } // Check if we found anything. if (aihead == 0) { gai_free(canon); return EAI_NONAME; } // Return canonical name in first entry. if (host != 0 && host[0] != '\0' && (hints.ai_flags & AI_CANONNAME)) { if (canon) { aihead->ai_canonname = canon; canon = 0; } else { std::size_t canonname_len = strlen(search[0].host) + 1; aihead->ai_canonname = gai_alloc(canonname_len); if (aihead->ai_canonname == 0) { freeaddrinfo_emulation(aihead); return EAI_MEMORY; } gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len); } } gai_free(canon); // Process the service name. if (service != 0 && service[0] != '\0') { rc = gai_serv(aihead, &hints, service); if (rc != 0) { freeaddrinfo_emulation(aihead); return rc; } } // Return result to caller. *result = aihead; return 0; } inline asio::error_code getnameinfo_emulation( const socket_addr_type* sa, std::size_t salen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec) { using namespace std; const char* addr; size_t addr_len; unsigned short port; switch (sa->sa_family) { case ASIO_OS_DEF(AF_INET): if (salen != sizeof(sockaddr_in4_type)) { return ec = asio::error::invalid_argument; } addr = reinterpret_cast( &reinterpret_cast(sa)->sin_addr); addr_len = sizeof(in4_addr_type); port = reinterpret_cast(sa)->sin_port; break; case ASIO_OS_DEF(AF_INET6): if (salen != sizeof(sockaddr_in6_type)) { return ec = asio::error::invalid_argument; } addr = reinterpret_cast( &reinterpret_cast(sa)->sin6_addr); addr_len = sizeof(in6_addr_type); port = reinterpret_cast(sa)->sin6_port; break; default: return ec = asio::error::address_family_not_supported; } if (host && hostlen > 0) { if (flags & NI_NUMERICHOST) { if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) { return ec; } } else { hostent hent; char hbuf[8192] = ""; hostent* hptr = socket_ops::gethostbyaddr(addr, static_cast(addr_len), sa->sa_family, &hent, hbuf, sizeof(hbuf), ec); if (hptr && hptr->h_name && hptr->h_name[0] != '\0') { if (flags & NI_NOFQDN) { char* dot = strchr(hptr->h_name, '.'); if (dot) { *dot = 0; } } gai_strcpy(host, hptr->h_name, hostlen); socket_ops::freehostent(hptr); } else { socket_ops::freehostent(hptr); if (flags & NI_NAMEREQD) { return ec = asio::error::host_not_found; } if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) { return ec; } } } } if (serv && servlen > 0) { if (flags & NI_NUMERICSERV) { if (servlen < 6) { return ec = asio::error::no_buffer_space; } #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(serv, servlen, "%u", ntohs(port)); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(serv, "%u", ntohs(port)); #endif // defined(ASIO_HAS_SECURE_RTL) } else { #if defined(ASIO_HAS_PTHREADS) static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; ::pthread_mutex_lock(&mutex); #endif // defined(ASIO_HAS_PTHREADS) servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? "udp" : 0); if (sptr && sptr->s_name && sptr->s_name[0] != '\0') { gai_strcpy(serv, sptr->s_name, servlen); } else { if (servlen < 6) { return ec = asio::error::no_buffer_space; } #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(serv, servlen, "%u", ntohs(port)); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(serv, "%u", ntohs(port)); #endif // defined(ASIO_HAS_SECURE_RTL) } #if defined(ASIO_HAS_PTHREADS) ::pthread_mutex_unlock(&mutex); #endif // defined(ASIO_HAS_PTHREADS) } } ec = asio::error_code(); return ec; } #endif // !defined(ASIO_HAS_GETADDRINFO) inline asio::error_code translate_addrinfo_error(int error) { switch (error) { case 0: return asio::error_code(); case EAI_AGAIN: return asio::error::host_not_found_try_again; case EAI_BADFLAGS: return asio::error::invalid_argument; case EAI_FAIL: return asio::error::no_recovery; case EAI_FAMILY: return asio::error::address_family_not_supported; case EAI_MEMORY: return asio::error::no_memory; case EAI_NONAME: #if defined(EAI_ADDRFAMILY) case EAI_ADDRFAMILY: #endif #if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME) case EAI_NODATA: #endif return asio::error::host_not_found; case EAI_SERVICE: return asio::error::service_not_found; case EAI_SOCKTYPE: return asio::error::socket_type_not_supported; default: // Possibly the non-portable EAI_SYSTEM. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) return asio::error_code( WSAGetLastError(), asio::error::get_system_category()); #else return asio::error_code( errno, asio::error::get_system_category()); #endif } } asio::error_code getaddrinfo(const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec) { host = (host && *host) ? host : 0; service = (service && *service) ? service : 0; clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. int error = ::getaddrinfo(host, service, &hints, result); return ec = translate_addrinfo_error(error); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *gai_t)(const char*, const char*, const addrinfo_type*, addrinfo_type**); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, "getaddrinfo")) { int error = gai(host, service, &hints, result); return ec = translate_addrinfo_error(error); } } int error = getaddrinfo_emulation(host, service, &hints, result); return ec = translate_addrinfo_error(error); # endif #elif !defined(ASIO_HAS_GETADDRINFO) int error = getaddrinfo_emulation(host, service, &hints, result); return ec = translate_addrinfo_error(error); #else int error = ::getaddrinfo(host, service, &hints, result); return ec = translate_addrinfo_error(error); #endif } asio::error_code background_getaddrinfo( const weak_cancel_token_type& cancel_token, const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else socket_ops::getaddrinfo(host, service, hints, result, ec); return ec; } void freeaddrinfo(addrinfo_type* ai) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. ::freeaddrinfo(ai); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *fai_t)(addrinfo_type*); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, "freeaddrinfo")) { fai(ai); return; } } freeaddrinfo_emulation(ai); # endif #elif !defined(ASIO_HAS_GETADDRINFO) freeaddrinfo_emulation(ai); #else ::freeaddrinfo(ai); #endif } asio::error_code getnameinfo(const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. clear_last_error(); int error = ::getnameinfo(addr, static_cast(addrlen), host, static_cast(hostlen), serv, static_cast(servlen), flags); return ec = translate_addrinfo_error(error); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *gni_t)(const socket_addr_type*, int, char*, DWORD, char*, DWORD, int); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, "getnameinfo")) { clear_last_error(); int error = gni(addr, static_cast(addrlen), host, static_cast(hostlen), serv, static_cast(servlen), flags); return ec = translate_addrinfo_error(error); } } clear_last_error(); return getnameinfo_emulation(addr, addrlen, host, hostlen, serv, servlen, flags, ec); # endif #elif !defined(ASIO_HAS_GETADDRINFO) using namespace std; // For memcpy. sockaddr_storage_type tmp_addr; memcpy(&tmp_addr, addr, addrlen); tmp_addr.ss_len = addrlen; addr = reinterpret_cast(&tmp_addr); clear_last_error(); return getnameinfo_emulation(addr, addrlen, host, hostlen, serv, servlen, flags, ec); #else clear_last_error(); int error = ::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags); return ec = translate_addrinfo_error(error); #endif } asio::error_code sync_getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec) { // First try resolving with the service name. If that fails try resolving // but allow the service to be returned as a number. int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags, ec); if (ec) { socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags | NI_NUMERICSERV, ec); } return ec; } asio::error_code background_getnameinfo( const weak_cancel_token_type& cancel_token, const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec) { if (cancel_token.expired()) { ec = asio::error::operation_aborted; } else { // First try resolving with the service name. If that fails try resolving // but allow the service to be returned as a number. int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags, ec); if (ec) { socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags | NI_NUMERICSERV, ec); } } return ec; } #endif // !defined(ASIO_WINDOWS_RUNTIME) u_long_type network_to_host_long(u_long_type value) { #if defined(ASIO_WINDOWS_RUNTIME) unsigned char* value_p = reinterpret_cast(&value); u_long_type result = (static_cast(value_p[0]) << 24) | (static_cast(value_p[1]) << 16) | (static_cast(value_p[2]) << 8) | static_cast(value_p[3]); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return ntohl(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_long_type host_to_network_long(u_long_type value) { #if defined(ASIO_WINDOWS_RUNTIME) u_long_type result; unsigned char* result_p = reinterpret_cast(&result); result_p[0] = static_cast((value >> 24) & 0xFF); result_p[1] = static_cast((value >> 16) & 0xFF); result_p[2] = static_cast((value >> 8) & 0xFF); result_p[3] = static_cast(value & 0xFF); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return htonl(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_short_type network_to_host_short(u_short_type value) { #if defined(ASIO_WINDOWS_RUNTIME) unsigned char* value_p = reinterpret_cast(&value); u_short_type result = (static_cast(value_p[0]) << 8) | static_cast(value_p[1]); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return ntohs(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_short_type host_to_network_short(u_short_type value) { #if defined(ASIO_WINDOWS_RUNTIME) u_short_type result; unsigned char* result_p = reinterpret_cast(&result); result_p[0] = static_cast((value >> 8) & 0xFF); result_p[1] = static_cast(value & 0xFF); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return htons(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } } // namespace socket_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_OPS_IPP galera-3-25.3.20/asio/asio/detail/impl/resolver_service_base.ipp0000644000015300001660000000613713042054732024323 0ustar jenkinsjenkins// // detail/impl/resolver_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/resolver_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolver_service_base::work_io_service_runner { public: work_io_service_runner(asio::io_service& io_service) : io_service_(io_service) {} void operator()() { io_service_.run(); } private: asio::io_service& io_service_; }; resolver_service_base::resolver_service_base( asio::io_service& io_service) : io_service_impl_(asio::use_service(io_service)), work_io_service_(new asio::io_service), work_io_service_impl_(asio::use_service< io_service_impl>(*work_io_service_)), work_(new asio::io_service::work(*work_io_service_)), work_thread_(0) { } resolver_service_base::~resolver_service_base() { shutdown_service(); } void resolver_service_base::shutdown_service() { work_.reset(); if (work_io_service_.get()) { work_io_service_->stop(); if (work_thread_.get()) { work_thread_->join(); work_thread_.reset(); } work_io_service_.reset(); } } void resolver_service_base::fork_service( asio::io_service::fork_event fork_ev) { if (work_thread_.get()) { if (fork_ev == asio::io_service::fork_prepare) { work_io_service_->stop(); work_thread_->join(); } else { work_io_service_->reset(); work_thread_.reset(new asio::detail::thread( work_io_service_runner(*work_io_service_))); } } } void resolver_service_base::construct( resolver_service_base::implementation_type& impl) { impl.reset(static_cast(0), socket_ops::noop_deleter()); } void resolver_service_base::destroy( resolver_service_base::implementation_type& impl) { ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel")); impl.reset(); } void resolver_service_base::cancel( resolver_service_base::implementation_type& impl) { ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel")); impl.reset(static_cast(0), socket_ops::noop_deleter()); } void resolver_service_base::start_resolve_op(operation* op) { start_work_thread(); io_service_impl_.work_started(); work_io_service_impl_.post_immediate_completion(op, false); } void resolver_service_base::start_work_thread() { asio::detail::mutex::scoped_lock lock(mutex_); if (!work_thread_.get()) { work_thread_.reset(new asio::detail::thread( work_io_service_runner(*work_io_service_))); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP galera-3-25.3.20/asio/asio/detail/impl/select_reactor.hpp0000644000015300001660000000453313042054732022743 0ustar jenkinsjenkins// // detail/impl/select_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP #define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void select_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void select_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void select_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) interrupter_.interrupt(); } template std::size_t select_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/impl/task_io_service.ipp0000644000015300001660000002603113042054732023114 0ustar jenkinsjenkins// // detail/impl/task_io_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP #define ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/detail/event.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/task_io_service.hpp" #include "asio/detail/task_io_service_thread_info.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct task_io_service::task_cleanup { ~task_cleanup() { if (this_thread_->private_outstanding_work > 0) { asio::detail::increment( task_io_service_->outstanding_work_, this_thread_->private_outstanding_work); } this_thread_->private_outstanding_work = 0; // Enqueue the completed operations and reinsert the task at the end of // the operation queue. lock_->lock(); task_io_service_->task_interrupted_ = true; task_io_service_->op_queue_.push(this_thread_->private_op_queue); task_io_service_->op_queue_.push(&task_io_service_->task_operation_); } task_io_service* task_io_service_; mutex::scoped_lock* lock_; thread_info* this_thread_; }; struct task_io_service::work_cleanup { ~work_cleanup() { if (this_thread_->private_outstanding_work > 1) { asio::detail::increment( task_io_service_->outstanding_work_, this_thread_->private_outstanding_work - 1); } else if (this_thread_->private_outstanding_work < 1) { task_io_service_->work_finished(); } this_thread_->private_outstanding_work = 0; #if defined(ASIO_HAS_THREADS) if (!this_thread_->private_op_queue.empty()) { lock_->lock(); task_io_service_->op_queue_.push(this_thread_->private_op_queue); } #endif // defined(ASIO_HAS_THREADS) } task_io_service* task_io_service_; mutex::scoped_lock* lock_; thread_info* this_thread_; }; task_io_service::task_io_service( asio::io_service& io_service, std::size_t concurrency_hint) : asio::detail::service_base(io_service), one_thread_(concurrency_hint == 1), mutex_(), task_(0), task_interrupted_(true), outstanding_work_(0), stopped_(false), shutdown_(false) { ASIO_HANDLER_TRACKING_INIT; } void task_io_service::shutdown_service() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); // Destroy handler objects. while (!op_queue_.empty()) { operation* o = op_queue_.front(); op_queue_.pop(); if (o != &task_operation_) o->destroy(); } // Reset to initial state. task_ = 0; } void task_io_service::init_task() { mutex::scoped_lock lock(mutex_); if (!shutdown_ && !task_) { task_ = &use_service(this->get_io_service()); op_queue_.push(&task_operation_); wake_one_thread_and_unlock(lock); } } std::size_t task_io_service::run(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); std::size_t n = 0; for (; do_run_one(lock, this_thread, ec); lock.lock()) if (n != (std::numeric_limits::max)()) ++n; return n; } std::size_t task_io_service::run_one(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); return do_run_one(lock, this_thread, ec); } std::size_t task_io_service::poll(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_THREADS) // We want to support nested calls to poll() and poll_one(), so any handlers // that are already on a thread-private queue need to be put on to the main // queue now. if (one_thread_) if (thread_info* outer_thread_info = ctx.next_by_key()) op_queue_.push(outer_thread_info->private_op_queue); #endif // defined(ASIO_HAS_THREADS) std::size_t n = 0; for (; do_poll_one(lock, this_thread, ec); lock.lock()) if (n != (std::numeric_limits::max)()) ++n; return n; } std::size_t task_io_service::poll_one(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_THREADS) // We want to support nested calls to poll() and poll_one(), so any handlers // that are already on a thread-private queue need to be put on to the main // queue now. if (one_thread_) if (thread_info* outer_thread_info = ctx.next_by_key()) op_queue_.push(outer_thread_info->private_op_queue); #endif // defined(ASIO_HAS_THREADS) return do_poll_one(lock, this_thread, ec); } void task_io_service::stop() { mutex::scoped_lock lock(mutex_); stop_all_threads(lock); } bool task_io_service::stopped() const { mutex::scoped_lock lock(mutex_); return stopped_; } void task_io_service::reset() { mutex::scoped_lock lock(mutex_); stopped_ = false; } void task_io_service::post_immediate_completion( task_io_service::operation* op, bool is_continuation) { #if defined(ASIO_HAS_THREADS) if (one_thread_ || is_continuation) { if (thread_info* this_thread = thread_call_stack::contains(this)) { ++this_thread->private_outstanding_work; this_thread->private_op_queue.push(op); return; } } #else // defined(ASIO_HAS_THREADS) (void)is_continuation; #endif // defined(ASIO_HAS_THREADS) work_started(); mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void task_io_service::post_deferred_completion(task_io_service::operation* op) { #if defined(ASIO_HAS_THREADS) if (one_thread_) { if (thread_info* this_thread = thread_call_stack::contains(this)) { this_thread->private_op_queue.push(op); return; } } #endif // defined(ASIO_HAS_THREADS) mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void task_io_service::post_deferred_completions( op_queue& ops) { if (!ops.empty()) { #if defined(ASIO_HAS_THREADS) if (one_thread_) { if (thread_info* this_thread = thread_call_stack::contains(this)) { this_thread->private_op_queue.push(ops); return; } } #endif // defined(ASIO_HAS_THREADS) mutex::scoped_lock lock(mutex_); op_queue_.push(ops); wake_one_thread_and_unlock(lock); } } void task_io_service::do_dispatch( task_io_service::operation* op) { work_started(); mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void task_io_service::abandon_operations( op_queue& ops) { op_queue ops2; ops2.push(ops); } std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock, task_io_service::thread_info& this_thread, const asio::error_code& ec) { while (!stopped_) { if (!op_queue_.empty()) { // Prepare to execute first handler from queue. operation* o = op_queue_.front(); op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); if (o == &task_operation_) { task_interrupted_ = more_handlers; if (more_handlers && !one_thread_) wakeup_event_.unlock_and_signal_one(lock); else lock.unlock(); task_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(!more_handlers, this_thread.private_op_queue); } else { std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(*this, ec, task_result); return 1; } } else { wakeup_event_.clear(lock); wakeup_event_.wait(lock); } } return 0; } std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock, task_io_service::thread_info& this_thread, const asio::error_code& ec) { if (stopped_) return 0; operation* o = op_queue_.front(); if (o == &task_operation_) { op_queue_.pop(); lock.unlock(); { task_cleanup c = { this, &lock, &this_thread }; (void)c; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(false, this_thread.private_op_queue); } o = op_queue_.front(); if (o == &task_operation_) { wakeup_event_.maybe_unlock_and_signal_one(lock); return 0; } } if (o == 0) return 0; op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(*this, ec, task_result); return 1; } void task_io_service::stop_all_threads( mutex::scoped_lock& lock) { stopped_ = true; wakeup_event_.signal_all(lock); if (!task_interrupted_ && task_) { task_interrupted_ = true; task_->interrupt(); } } void task_io_service::wake_one_thread_and_unlock( mutex::scoped_lock& lock) { if (!wakeup_event_.maybe_unlock_and_signal_one(lock)) { if (!task_interrupted_ && task_) { task_interrupted_ = true; task_->interrupt(); } lock.unlock(); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/winrt_ssocket_service_base.ipp0000644000015300001660000003763313042054732025365 0ustar jenkinsjenkins// // detail/impl/winrt_ssocket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/winrt_ssocket_service_base.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { winrt_ssocket_service_base::winrt_ssocket_service_base( asio::io_service& io_service) : io_service_(use_service(io_service)), async_manager_(use_service(io_service)), mutex_(), impl_list_(0) { } void winrt_ssocket_service_base::shutdown_service() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); base_implementation_type* impl = impl_list_; while (impl) { asio::error_code ignored_ec; close(*impl, ignored_ec); impl = impl->next_; } } void winrt_ssocket_service_base::construct( winrt_ssocket_service_base::base_implementation_type& impl) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void winrt_ssocket_service_base::base_move_construct( winrt_ssocket_service_base::base_implementation_type& impl, winrt_ssocket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = nullptr; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void winrt_ssocket_service_base::base_move_assign( winrt_ssocket_service_base::base_implementation_type& impl, winrt_ssocket_service_base& other_service, winrt_ssocket_service_base::base_implementation_type& other_impl) { asio::error_code ignored_ec; close(impl, ignored_ec); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.socket_ = other_impl.socket_; other_impl.socket_ = nullptr; if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void winrt_ssocket_service_base::destroy( winrt_ssocket_service_base::base_implementation_type& impl) { asio::error_code ignored_ec; close(impl, ignored_ec); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code winrt_ssocket_service_base::close( winrt_ssocket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (impl.socket_) { delete impl.socket_; impl.socket_ = nullptr; } ec = asio::error_code(); return ec; } std::size_t winrt_ssocket_service_base::do_get_endpoint( const base_implementation_type& impl, bool local, void* addr, std::size_t addr_len, asio::error_code& ec) const { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return addr_len; } try { std::string addr_string = winrt_utils::string(local ? impl.socket_->Information->LocalAddress->CanonicalName : impl.socket_->Information->RemoteAddress->CanonicalName); unsigned short port = winrt_utils::integer(local ? impl.socket_->Information->LocalPort : impl.socket_->Information->RemotePort); unsigned long scope = 0; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): if (addr_len < sizeof(sockaddr_in4_type)) { ec = asio::error::invalid_argument; return addr_len; } else { socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(), &reinterpret_cast(addr)->sin_addr, &scope, ec); reinterpret_cast(addr)->sin_port = socket_ops::host_to_network_short(port); ec = asio::error_code(); return sizeof(sockaddr_in4_type); } case ASIO_OS_DEF(AF_INET6): if (addr_len < sizeof(sockaddr_in6_type)) { ec = asio::error::invalid_argument; return addr_len; } else { socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(), &reinterpret_cast(addr)->sin6_addr, &scope, ec); reinterpret_cast(addr)->sin6_port = socket_ops::host_to_network_short(port); ec = asio::error_code(); return sizeof(sockaddr_in6_type); } default: ec = asio::error::address_family_not_supported; return addr_len; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return addr_len; } } asio::error_code winrt_ssocket_service_base::do_set_option( winrt_ssocket_service_base::base_implementation_type& impl, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } try { if (level == ASIO_OS_DEF(SOL_SOCKET) && optname == ASIO_OS_DEF(SO_KEEPALIVE)) { if (optlen == sizeof(int)) { int value = 0; std::memcpy(&value, optval, optlen); impl.socket_->Control->KeepAlive = !!value; ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else if (level == ASIO_OS_DEF(IPPROTO_TCP) && optname == ASIO_OS_DEF(TCP_NODELAY)) { if (optlen == sizeof(int)) { int value = 0; std::memcpy(&value, optval, optlen); impl.socket_->Control->NoDelay = !!value; ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else { ec = asio::error::invalid_argument; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } void winrt_ssocket_service_base::do_get_option( const winrt_ssocket_service_base::base_implementation_type& impl, int level, int optname, void* optval, std::size_t* optlen, asio::error_code& ec) const { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return; } try { if (level == ASIO_OS_DEF(SOL_SOCKET) && optname == ASIO_OS_DEF(SO_KEEPALIVE)) { if (*optlen >= sizeof(int)) { int value = impl.socket_->Control->KeepAlive ? 1 : 0; std::memcpy(optval, &value, sizeof(int)); *optlen = sizeof(int); ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else if (level == ASIO_OS_DEF(IPPROTO_TCP) && optname == ASIO_OS_DEF(TCP_NODELAY)) { if (*optlen >= sizeof(int)) { int value = impl.socket_->Control->NoDelay ? 1 : 0; std::memcpy(optval, &value, sizeof(int)); *optlen = sizeof(int); ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else { ec = asio::error::invalid_argument; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } } asio::error_code winrt_ssocket_service_base::do_connect( winrt_ssocket_service_base::base_implementation_type& impl, const void* addr, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } char addr_string[max_addr_v6_str_len]; unsigned short port; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), &reinterpret_cast(addr)->sin_addr, addr_string, sizeof(addr_string), 0, ec); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin_port); break; case ASIO_OS_DEF(AF_INET6): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), &reinterpret_cast(addr)->sin6_addr, addr_string, sizeof(addr_string), 0, ec); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin6_port); break; default: ec = asio::error::address_family_not_supported; return ec; } if (!ec) try { async_manager_.sync(impl.socket_->ConnectAsync( ref new Windows::Networking::HostName( winrt_utils::string(addr_string)), winrt_utils::string(port)), ec); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } void winrt_ssocket_service_base::start_connect_op( winrt_ssocket_service_base::base_implementation_type& impl, const void* addr, winrt_async_op* op, bool is_continuation) { if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; io_service_.post_immediate_completion(op, is_continuation); return; } char addr_string[max_addr_v6_str_len]; unsigned short port = 0; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), &reinterpret_cast(addr)->sin_addr, addr_string, sizeof(addr_string), 0, op->ec_); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin_port); break; case ASIO_OS_DEF(AF_INET6): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), &reinterpret_cast(addr)->sin6_addr, addr_string, sizeof(addr_string), 0, op->ec_); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin6_port); break; default: op->ec_ = asio::error::address_family_not_supported; break; } if (op->ec_) { io_service_.post_immediate_completion(op, is_continuation); return; } try { async_manager_.async(impl.socket_->ConnectAsync( ref new Windows::Networking::HostName( winrt_utils::string(addr_string)), winrt_utils::string(port)), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code( e->HResult, asio::system_category()); io_service_.post_immediate_completion(op, is_continuation); } } std::size_t winrt_ssocket_service_base::do_send( winrt_ssocket_service_base::base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, asio::error_code& ec) { if (flags) { ec = asio::error::operation_not_supported; return 0; } if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { ec = asio::error_code(); return 0; } return async_manager_.sync( impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return 0; } } void winrt_ssocket_service_base::start_send_op( winrt_ssocket_service_base::base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation) { if (flags) { op->ec_ = asio::error::operation_not_supported; io_service_.post_immediate_completion(op, is_continuation); return; } if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; io_service_.post_immediate_completion(op, is_continuation); return; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { io_service_.post_immediate_completion(op, is_continuation); return; } async_manager_.async( impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code(e->HResult, asio::system_category()); io_service_.post_immediate_completion(op, is_continuation); } } std::size_t winrt_ssocket_service_base::do_receive( winrt_ssocket_service_base::base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, asio::error_code& ec) { if (flags) { ec = asio::error::operation_not_supported; return 0; } if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { ec = asio::error_code(); return 0; } async_manager_.sync( impl.socket_->InputStream->ReadAsync( bufs.buffers()[0], bufs.buffers()[0]->Capacity, Windows::Storage::Streams::InputStreamOptions::Partial), ec); std::size_t bytes_transferred = bufs.buffers()[0]->Length; if (bytes_transferred == 0 && !ec) { ec = asio::error::eof; } return bytes_transferred; } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return 0; } } void winrt_ssocket_service_base::start_receive_op( winrt_ssocket_service_base::base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation) { if (flags) { op->ec_ = asio::error::operation_not_supported; io_service_.post_immediate_completion(op, is_continuation); return; } if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; io_service_.post_immediate_completion(op, is_continuation); return; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { io_service_.post_immediate_completion(op, is_continuation); return; } async_manager_.async( impl.socket_->InputStream->ReadAsync( bufs.buffers()[0], bufs.buffers()[0]->Capacity, Windows::Storage::Streams::InputStreamOptions::Partial), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code(e->HResult, asio::system_category()); io_service_.post_immediate_completion(op, is_continuation); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP galera-3-25.3.20/asio/asio/detail/impl/task_io_service.hpp0000644000015300001660000000421513042054732023113 0ustar jenkinsjenkins// // detail/impl/task_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP #define ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/addressof.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void task_io_service::dispatch(Handler& handler) { if (thread_call_stack::contains(this)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); } else { // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch")); do_dispatch(p.p); p.v = p.p = 0; } } template void task_io_service::post(Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "post")); post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/impl/timer_queue_ptime.ipp0000644000015300001660000000415113042054732023464 0ustar jenkinsjenkins// // detail/impl/timer_queue_ptime.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP #define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_ptime.hpp" #include "asio/detail/push_options.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) namespace asio { namespace detail { timer_queue >::timer_queue() { } timer_queue >::~timer_queue() { } bool timer_queue >::enqueue_timer( const time_type& time, per_timer_data& timer, wait_op* op) { return impl_.enqueue_timer(time, timer, op); } bool timer_queue >::empty() const { return impl_.empty(); } long timer_queue >::wait_duration_msec( long max_duration) const { return impl_.wait_duration_msec(max_duration); } long timer_queue >::wait_duration_usec( long max_duration) const { return impl_.wait_duration_usec(max_duration); } void timer_queue >::get_ready_timers( op_queue& ops) { impl_.get_ready_timers(ops); } void timer_queue >::get_all_timers( op_queue& ops) { impl_.get_all_timers(ops); } std::size_t timer_queue >::cancel_timer( per_timer_data& timer, op_queue& ops, std::size_t max_cancelled) { return impl_.cancel_timer(timer, ops, max_cancelled); } } // namespace detail } // namespace asio #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP galera-3-25.3.20/asio/asio/detail/impl/win_thread.ipp0000644000015300001660000000712713042054732022074 0ustar jenkinsjenkins// // detail/impl/win_thread.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP #define ASIO_DETAIL_IMPL_WIN_THREAD_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && !defined(UNDER_CE) #include #include "asio/detail/throw_error.hpp" #include "asio/detail/win_thread.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_thread::~win_thread() { ::CloseHandle(thread_); // The exit_event_ handle is deliberately allowed to leak here since it // is an error for the owner of an internal thread not to join() it. } void win_thread::join() { HANDLE handles[2] = { exit_event_, thread_ }; ::WaitForMultipleObjects(2, handles, FALSE, INFINITE); ::CloseHandle(exit_event_); if (terminate_threads()) { ::TerminateThread(thread_, 0); } else { ::QueueUserAPC(apc_function, thread_, 0); ::WaitForSingleObject(thread_, INFINITE); } } void win_thread::start_thread(func_base* arg, unsigned int stack_size) { ::HANDLE entry_event = 0; arg->entry_event_ = entry_event = ::CreateEvent(0, true, false, 0); if (!entry_event) { DWORD last_error = ::GetLastError(); delete arg; asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread.entry_event"); } arg->exit_event_ = exit_event_ = ::CreateEvent(0, true, false, 0); if (!exit_event_) { DWORD last_error = ::GetLastError(); delete arg; asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread.exit_event"); } unsigned int thread_id = 0; thread_ = reinterpret_cast(::_beginthreadex(0, stack_size, win_thread_function, arg, 0, &thread_id)); if (!thread_) { DWORD last_error = ::GetLastError(); delete arg; if (entry_event) ::CloseHandle(entry_event); if (exit_event_) ::CloseHandle(exit_event_); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } if (entry_event) { ::WaitForSingleObject(entry_event, INFINITE); ::CloseHandle(entry_event); } } unsigned int __stdcall win_thread_function(void* arg) { win_thread::auto_func_base_ptr func = { static_cast(arg) }; ::SetEvent(func.ptr->entry_event_); func.ptr->run(); // Signal that the thread has finished its work, but rather than returning go // to sleep to put the thread into a well known state. If the thread is being // joined during global object destruction then it may be killed using // TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx // call will be interrupted using QueueUserAPC and the thread will shut down // cleanly. HANDLE exit_event = func.ptr->exit_event_; delete func.ptr; func.ptr = 0; ::SetEvent(exit_event); ::SleepEx(INFINITE, TRUE); return 0; } #if defined(WINVER) && (WINVER < 0x0500) void __stdcall apc_function(ULONG) {} #else void __stdcall apc_function(ULONG_PTR) {} #endif } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && !defined(UNDER_CE) #endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP galera-3-25.3.20/asio/asio/detail/impl/win_iocp_handle_service.ipp0000644000015300001660000003261013042054732024605 0ustar jenkinsjenkins// // detail/impl/win_iocp_handle_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_handle_service::overlapped_wrapper : public OVERLAPPED { public: explicit overlapped_wrapper(asio::error_code& ec) { Internal = 0; InternalHigh = 0; Offset = 0; OffsetHigh = 0; // Create a non-signalled manual-reset event, for GetOverlappedResult. hEvent = ::CreateEvent(0, TRUE, FALSE, 0); if (hEvent) { // As documented in GetQueuedCompletionStatus, setting the low order // bit of this event prevents our synchronous writes from being treated // as completion port events. DWORD_PTR tmp = reinterpret_cast(hEvent); hEvent = reinterpret_cast(tmp | 1); } else { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } } ~overlapped_wrapper() { if (hEvent) { ::CloseHandle(hEvent); } } }; win_iocp_handle_service::win_iocp_handle_service( asio::io_service& io_service) : iocp_service_(asio::use_service(io_service)), mutex_(), impl_list_(0) { } void win_iocp_handle_service::shutdown_service() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); implementation_type* impl = impl_list_; while (impl) { close_for_destruction(*impl); impl = impl->next_; } } void win_iocp_handle_service::construct( win_iocp_handle_service::implementation_type& impl) { impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_handle_service::move_construct( win_iocp_handle_service::implementation_type& impl, win_iocp_handle_service::implementation_type& other_impl) { impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_handle_service::move_assign( win_iocp_handle_service::implementation_type& impl, win_iocp_handle_service& other_service, win_iocp_handle_service::implementation_type& other_impl) { close_for_destruction(impl); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void win_iocp_handle_service::destroy( win_iocp_handle_service::implementation_type& impl) { close_for_destruction(impl); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code win_iocp_handle_service::assign( win_iocp_handle_service::implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (iocp_service_.register_handle(handle, ec)) return ec; impl.handle_ = handle; ec = asio::error_code(); return ec; } asio::error_code win_iocp_handle_service::close( win_iocp_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("handle", &impl, "close")); if (!::CloseHandle(impl.handle_)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; } else { ec = asio::error_code(); } return ec; } asio::error_code win_iocp_handle_service::cancel( win_iocp_handle_service::implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("handle", &impl, "cancel")); if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) { // The version of Windows supports cancellation from any thread. typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr; if (!cancel_io_ex(impl.handle_, 0)) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_NOT_FOUND) { // ERROR_NOT_FOUND means that there were no operations to be // cancelled. We swallow this error to match the behaviour on other // platforms. ec = asio::error_code(); } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } } else { ec = asio::error_code(); } } else if (impl.safe_cancellation_thread_id_ == 0) { // No operations have been started, so there's nothing to cancel. ec = asio::error_code(); } else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) { // Asynchronous operations have been started from the current thread only, // so it is safe to try to cancel them using CancelIo. if (!::CancelIo(impl.handle_)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } } else { // Asynchronous operations have been started from more than one thread, // so cancellation is not safe. ec = asio::error::operation_not_supported; } return ec; } size_t win_iocp_handle_service::do_write( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes on a handle is a no-op. if (asio::buffer_size(buffer) == 0) { ec = asio::error_code(); return 0; } overlapped_wrapper overlapped(ec); if (ec) { return 0; } // Write the data. overlapped.Offset = offset & 0xFFFFFFFF; overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::WriteFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), 0, &overlapped); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error != ERROR_IO_PENDING) { ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } } // Wait for the operation to complete. DWORD bytes_transferred = 0; ok = ::GetOverlappedResult(impl.handle_, &overlapped, &bytes_transferred, TRUE); if (!ok) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } ec = asio::error_code(); return bytes_transferred; } void win_iocp_handle_service::start_write_op( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) { iocp_service_.on_completion(op, asio::error::bad_descriptor); } else if (asio::buffer_size(buffer) == 0) { // A request to write 0 bytes on a handle is a no-op. iocp_service_.on_completion(op); } else { DWORD bytes_transferred = 0; op->Offset = offset & 0xFFFFFFFF; op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::WriteFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), &bytes_transferred, op); DWORD last_error = ::GetLastError(); if (!ok && last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { iocp_service_.on_completion(op, last_error, bytes_transferred); } else { iocp_service_.on_pending(op); } } } size_t win_iocp_handle_service::do_read( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream handle is a no-op. if (asio::buffer_size(buffer) == 0) { ec = asio::error_code(); return 0; } overlapped_wrapper overlapped(ec); if (ec) { return 0; } // Read some data. overlapped.Offset = offset & 0xFFFFFFFF; overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::ReadFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), 0, &overlapped); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { if (last_error == ERROR_HANDLE_EOF) { ec = asio::error::eof; } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } return 0; } } // Wait for the operation to complete. DWORD bytes_transferred = 0; ok = ::GetOverlappedResult(impl.handle_, &overlapped, &bytes_transferred, TRUE); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_HANDLE_EOF) { ec = asio::error::eof; } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0; } ec = asio::error_code(); return bytes_transferred; } void win_iocp_handle_service::start_read_op( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) { iocp_service_.on_completion(op, asio::error::bad_descriptor); } else if (asio::buffer_size(buffer) == 0) { // A request to read 0 bytes on a handle is a no-op. iocp_service_.on_completion(op); } else { DWORD bytes_transferred = 0; op->Offset = offset & 0xFFFFFFFF; op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::ReadFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), &bytes_transferred, op); DWORD last_error = ::GetLastError(); if (!ok && last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { iocp_service_.on_completion(op, last_error, bytes_transferred); } else { iocp_service_.on_pending(op); } } } void win_iocp_handle_service::update_cancellation_thread_id( win_iocp_handle_service::implementation_type& impl) { if (impl.safe_cancellation_thread_id_ == 0) impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) impl.safe_cancellation_thread_id_ = ~DWORD(0); } void win_iocp_handle_service::close_for_destruction(implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("handle", &impl, "close")); ::CloseHandle(impl.handle_); impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/buffer_sequence_adapter.ipp0000644000015300001660000000573213042054732024611 0ustar jenkinsjenkins// // detail/impl/buffer_sequence_adapter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP #define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include #include #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_buffer_impl : public Microsoft::WRL::RuntimeClass< Microsoft::WRL::RuntimeClassFlags< Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>, ABI::Windows::Storage::Streams::IBuffer, Windows::Storage::Streams::IBufferByteAccess> { public: explicit winrt_buffer_impl(const asio::const_buffer& b) { bytes_ = const_cast(asio::buffer_cast(b)); length_ = asio::buffer_size(b); capacity_ = asio::buffer_size(b); } explicit winrt_buffer_impl(const asio::mutable_buffer& b) { bytes_ = const_cast(asio::buffer_cast(b)); length_ = 0; capacity_ = asio::buffer_size(b); } ~winrt_buffer_impl() { } STDMETHODIMP Buffer(byte** value) { *value = bytes_; return S_OK; } STDMETHODIMP get_Capacity(UINT32* value) { *value = capacity_; return S_OK; } STDMETHODIMP get_Length(UINT32 *value) { *value = length_; return S_OK; } STDMETHODIMP put_Length(UINT32 value) { if (value > capacity_) return E_INVALIDARG; length_ = value; return S_OK; } private: byte* bytes_; UINT32 length_; UINT32 capacity_; }; void buffer_sequence_adapter_base::init_native_buffer( buffer_sequence_adapter_base::native_buffer_type& buf, const asio::mutable_buffer& buffer) { std::memset(&buf, 0, sizeof(native_buffer_type)); Microsoft::WRL::ComPtr insp = Microsoft::WRL::Make(buffer); buf = reinterpret_cast(insp.Get()); } void buffer_sequence_adapter_base::init_native_buffer( buffer_sequence_adapter_base::native_buffer_type& buf, const asio::const_buffer& buffer) { std::memset(&buf, 0, sizeof(native_buffer_type)); Microsoft::WRL::ComPtr insp = Microsoft::WRL::Make(buffer); Platform::Object^ buf_obj = reinterpret_cast(insp.Get()); buf = reinterpret_cast(insp.Get()); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP galera-3-25.3.20/asio/asio/detail/impl/win_static_mutex.ipp0000644000015300001660000000547613042054732023343 0ustar jenkinsjenkins// // detail/impl/win_static_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP #define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include #include "asio/detail/throw_error.hpp" #include "asio/detail/win_static_mutex.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void win_static_mutex::init() { int error = do_init(); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "static_mutex"); } int win_static_mutex::do_init() { using namespace std; // For sprintf. wchar_t mutex_name[128]; #if defined(ASIO_HAS_SECURE_RTL) swprintf_s( #else // defined(ASIO_HAS_SECURE_RTL) _snwprintf( #endif // defined(ASIO_HAS_SECURE_RTL) mutex_name, 128, L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p", static_cast(::GetCurrentProcessId()), this); HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name); DWORD last_error = ::GetLastError(); if (mutex == 0) return ::GetLastError(); if (last_error == ERROR_ALREADY_EXISTS) ::WaitForSingleObject(mutex, INFINITE); if (initialised_) { ::ReleaseMutex(mutex); ::CloseHandle(mutex); return 0; } #if defined(__MINGW32__) // Not sure if MinGW supports structured exception handling, so for now // we'll just call the Windows API and hope. # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # endif #else __try { # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # endif } __except(GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { ::ReleaseMutex(mutex); ::CloseHandle(mutex); return ERROR_OUTOFMEMORY; } #endif initialised_ = true; ::ReleaseMutex(mutex); ::CloseHandle(mutex); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP galera-3-25.3.20/asio/asio/detail/impl/reactive_descriptor_service.ipp0000644000015300001660000001277313042054732025533 0ustar jenkinsjenkins// // detail/impl/reactive_descriptor_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/error.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_descriptor_service::reactive_descriptor_service( asio::io_service& io_service) : reactor_(asio::use_service(io_service)) { reactor_.init_task(); } void reactive_descriptor_service::shutdown_service() { } void reactive_descriptor_service::construct( reactive_descriptor_service::implementation_type& impl) { impl.descriptor_ = -1; impl.state_ = 0; } void reactive_descriptor_service::move_construct( reactive_descriptor_service::implementation_type& impl, reactive_descriptor_service::implementation_type& other_impl) { impl.descriptor_ = other_impl.descriptor_; other_impl.descriptor_ = -1; impl.state_ = other_impl.state_; other_impl.state_ = 0; reactor_.move_descriptor(impl.descriptor_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_descriptor_service::move_assign( reactive_descriptor_service::implementation_type& impl, reactive_descriptor_service& other_service, reactive_descriptor_service::implementation_type& other_impl) { destroy(impl); impl.descriptor_ = other_impl.descriptor_; other_impl.descriptor_ = -1; impl.state_ = other_impl.state_; other_impl.state_ = 0; other_service.reactor_.move_descriptor(impl.descriptor_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_descriptor_service::destroy( reactive_descriptor_service::implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("descriptor", &impl, "close")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, (impl.state_ & descriptor_ops::possible_dup) == 0); } asio::error_code ignored_ec; descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec); } asio::error_code reactive_descriptor_service::assign( reactive_descriptor_service::implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (int err = reactor_.register_descriptor( native_descriptor, impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.descriptor_ = native_descriptor; impl.state_ = descriptor_ops::possible_dup; ec = asio::error_code(); return ec; } asio::error_code reactive_descriptor_service::close( reactive_descriptor_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("descriptor", &impl, "close")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, (impl.state_ & descriptor_ops::possible_dup) == 0); } descriptor_ops::close(impl.descriptor_, impl.state_, ec); // The descriptor is closed by the OS even if close() returns an error. // // (Actually, POSIX says the state of the descriptor is unspecified. On // Linux the descriptor is apparently closed anyway; e.g. see // http://lkml.org/lkml/2005/9/10/129 // We'll just have to assume that other OSes follow the same behaviour.) construct(impl); return ec; } reactive_descriptor_service::native_handle_type reactive_descriptor_service::release( reactive_descriptor_service::implementation_type& impl) { native_handle_type descriptor = impl.descriptor_; if (is_open(impl)) { ASIO_HANDLER_OPERATION(("descriptor", &impl, "release")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false); construct(impl); } return descriptor; } asio::error_code reactive_descriptor_service::cancel( reactive_descriptor_service::implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("descriptor", &impl, "cancel")); reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_); ec = asio::error_code(); return ec; } void reactive_descriptor_service::start_op( reactive_descriptor_service::implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop) { if (!noop) { if ((impl.state_ & descriptor_ops::non_blocking) || descriptor_ops::set_internal_non_blocking( impl.descriptor_, impl.state_, true, op->ec_)) { reactor_.start_op(op_type, impl.descriptor_, impl.reactor_data_, op, is_continuation, is_non_blocking); return; } } reactor_.post_immediate_completion(op, is_continuation); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/strand_service.ipp0000644000015300001660000001131313042054732022753 0ustar jenkinsjenkins// // detail/impl/strand_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP #define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/strand_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct strand_service::on_do_complete_exit { io_service_impl* owner_; strand_impl* impl_; ~on_do_complete_exit() { impl_->mutex_.lock(); impl_->ready_queue_.push(impl_->waiting_queue_); bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); impl_->mutex_.unlock(); if (more_handlers) owner_->post_immediate_completion(impl_, true); } }; strand_service::strand_service(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(asio::use_service(io_service)), mutex_(), salt_(0) { } void strand_service::shutdown_service() { op_queue ops; asio::detail::mutex::scoped_lock lock(mutex_); for (std::size_t i = 0; i < num_implementations; ++i) { if (strand_impl* impl = implementations_[i].get()) { ops.push(impl->waiting_queue_); ops.push(impl->ready_queue_); } } } void strand_service::construct(strand_service::implementation_type& impl) { asio::detail::mutex::scoped_lock lock(mutex_); std::size_t salt = salt_++; #if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) std::size_t index = salt; #else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) std::size_t index = reinterpret_cast(&impl); index += (reinterpret_cast(&impl) >> 3); index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2); #endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) index = index % num_implementations; if (!implementations_[index].get()) implementations_[index].reset(new strand_impl); impl = implementations_[index].get(); } bool strand_service::running_in_this_thread( const implementation_type& impl) const { return call_stack::contains(impl) != 0; } bool strand_service::do_dispatch(implementation_type& impl, operation* op) { // If we are running inside the io_service, and no other handler already // holds the strand lock, then the handler can run immediately. bool can_dispatch = io_service_.can_dispatch(); impl->mutex_.lock(); if (can_dispatch && !impl->locked_) { // Immediate invocation is allowed. impl->locked_ = true; impl->mutex_.unlock(); return true; } if (impl->locked_) { // Some other handler already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_.unlock(); } else { // The handler is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_.unlock(); impl->ready_queue_.push(op); io_service_.post_immediate_completion(impl, false); } return false; } void strand_service::do_post(implementation_type& impl, operation* op, bool is_continuation) { impl->mutex_.lock(); if (impl->locked_) { // Some other handler already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_.unlock(); } else { // The handler is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_.unlock(); impl->ready_queue_.push(op); io_service_.post_immediate_completion(impl, is_continuation); } } void strand_service::do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t /*bytes_transferred*/) { if (owner) { strand_impl* impl = static_cast(base); // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_do_complete_exit on_exit = { owner, impl }; (void)on_exit; // Run all ready handlers. No lock is required since the ready queue is // accessed only within the strand. while (operation* o = impl->ready_queue_.front()) { impl->ready_queue_.pop(); o->complete(*owner, ec, 0); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/descriptor_ops.ipp0000644000015300001660000002513513042054732023006 0ustar jenkinsjenkins// // detail/impl/descriptor_ops.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP #define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/descriptor_ops.hpp" #include "asio/error.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace descriptor_ops { int open(const char* path, int flags, asio::error_code& ec) { errno = 0; int result = error_wrapper(::open(path, flags), ec); if (result >= 0) ec = asio::error_code(); return result; } int close(int d, state_type& state, asio::error_code& ec) { int result = 0; if (d != -1) { errno = 0; result = error_wrapper(::close(d), ec); if (result != 0 && (ec == asio::error::would_block || ec == asio::error::try_again)) { // According to UNIX Network Programming Vol. 1, it is possible for // close() to fail with EWOULDBLOCK under certain circumstances. What // isn't clear is the state of the descriptor after this error. The one // current OS where this behaviour is seen, Windows, says that the socket // remains open. Therefore we'll put the descriptor back into blocking // mode and have another attempt at closing it. #if defined(__SYMBIAN32__) int flags = ::fcntl(d, F_GETFL, 0); if (flags >= 0) ::fcntl(d, F_SETFL, flags & ~O_NONBLOCK); #else // defined(__SYMBIAN32__) ioctl_arg_type arg = 0; ::ioctl(d, FIONBIO, &arg); #endif // defined(__SYMBIAN32__) state &= ~non_blocking; errno = 0; result = error_wrapper(::close(d), ec); } } if (result == 0) ec = asio::error_code(); return result; } bool set_user_non_blocking(int d, state_type& state, bool value, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return false; } errno = 0; #if defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); if (result >= 0) { errno = 0; int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); } #else // defined(__SYMBIAN32__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); #endif // defined(__SYMBIAN32__) if (result >= 0) { ec = asio::error_code(); if (value) state |= user_set_non_blocking; else { // Clearing the user-set non-blocking mode always overrides any // internally-set non-blocking flag. Any subsequent asynchronous // operations will need to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } return true; } return false; } bool set_internal_non_blocking(int d, state_type& state, bool value, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return false; } if (!value && (state & user_set_non_blocking)) { // It does not make sense to clear the internal non-blocking flag if the // user still wants non-blocking behaviour. Return an error and let the // caller figure out whether to update the user-set non-blocking flag. ec = asio::error::invalid_argument; return false; } errno = 0; #if defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); if (result >= 0) { errno = 0; int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); } #else // defined(__SYMBIAN32__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); #endif // defined(__SYMBIAN32__) if (result >= 0) { ec = asio::error_code(); if (value) state |= internal_non_blocking; else state &= ~internal_non_blocking; return true; } return false; } std::size_t sync_read(int d, state_type state, buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream is a no-op. if (all_empty) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. errno = 0; signed_size_type bytes = error_wrapper(::readv( d, bufs, static_cast(count)), ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Check for EOF. if (bytes == 0) { ec = asio::error::eof; return 0; } // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for descriptor to become ready. if (descriptor_ops::poll_read(d, 0, ec) < 0) return 0; } } bool non_blocking_read(int d, buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred) { for (;;) { // Read some data. errno = 0; signed_size_type bytes = error_wrapper(::readv( d, bufs, static_cast(count)), ec); // Check for end of stream. if (bytes == 0) { ec = asio::error::eof; return true; } // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes > 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } std::size_t sync_write(int d, state_type state, const buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes on a stream is a no-op. if (all_empty) { ec = asio::error_code(); return 0; } // Write some data. for (;;) { // Try to complete the operation without blocking. errno = 0; signed_size_type bytes = error_wrapper(::writev( d, bufs, static_cast(count)), ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for descriptor to become ready. if (descriptor_ops::poll_write(d, 0, ec) < 0) return 0; } } bool non_blocking_write(int d, const buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred) { for (;;) { // Write some data. errno = 0; signed_size_type bytes = error_wrapper(::writev( d, bufs, static_cast(count)), ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } int ioctl(int d, state_type& state, long cmd, ioctl_arg_type* arg, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::ioctl(d, cmd, arg), ec); if (result >= 0) { ec = asio::error_code(); // When updating the non-blocking mode we always perform the ioctl syscall, // even if the flags would otherwise indicate that the descriptor is // already in the correct state. This ensures that the underlying // descriptor is put into the state that has been requested by the user. If // the ioctl syscall was successful then we need to update the flags to // match. if (cmd == static_cast(FIONBIO)) { if (*arg) { state |= user_set_non_blocking; } else { // Clearing the non-blocking mode always overrides any internally-set // non-blocking flag. Any subsequent asynchronous operations will need // to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } } } return result; } int fcntl(int d, int cmd, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::fcntl(d, cmd), ec); if (result != -1) ec = asio::error_code(); return result; } int fcntl(int d, int cmd, long arg, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::fcntl(d, cmd, arg), ec); if (result != -1) ec = asio::error_code(); return result; } int poll_read(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLIN; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_write(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLOUT; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } } // namespace descriptor_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP galera-3-25.3.20/asio/asio/detail/impl/win_tss_ptr.ipp0000644000015300001660000000244213042054732022316 0ustar jenkinsjenkins// // detail/impl/win_tss_ptr.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP #define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_tss_ptr.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD win_tss_ptr_create() { #if defined(UNDER_CE) enum { out_of_indexes = 0xFFFFFFFF }; #else enum { out_of_indexes = TLS_OUT_OF_INDEXES }; #endif DWORD tss_key = ::TlsAlloc(); if (tss_key == out_of_indexes) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "tss"); } return tss_key; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP galera-3-25.3.20/asio/asio/detail/impl/dev_poll_reactor.hpp0000644000015300001660000000377313042054732023275 0ustar jenkinsjenkins// // detail/impl/dev_poll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP #define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void dev_poll_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } template void dev_poll_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void dev_poll_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) interrupter_.interrupt(); } template std::size_t dev_poll_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/impl/posix_tss_ptr.ipp0000644000015300001660000000215513042054732022664 0ustar jenkinsjenkins// // detail/impl/posix_tss_ptr.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP #define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_tss_ptr.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void posix_tss_ptr_create(pthread_key_t& key) { int error = ::pthread_key_create(&key, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "tss"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP galera-3-25.3.20/asio/asio/detail/impl/epoll_reactor.ipp0000644000015300001660000004247013042054732022602 0ustar jenkinsjenkins// // detail/impl/epoll_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP #define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EPOLL) #include #include #include "asio/detail/epoll_reactor.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #if defined(ASIO_HAS_TIMERFD) # include #endif // defined(ASIO_HAS_TIMERFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { epoll_reactor::epoll_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), interrupter_(), epoll_fd_(do_epoll_create()), timer_fd_(do_timerfd_create()), shutdown_(false) { // Add the interrupter's descriptor to epoll. epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); interrupter_.interrupt(); // Add the timer descriptor to epoll. if (timer_fd_ != -1) { ev.events = EPOLLIN | EPOLLERR; ev.data.ptr = &timer_fd_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); } } epoll_reactor::~epoll_reactor() { if (epoll_fd_ != -1) close(epoll_fd_); if (timer_fd_ != -1) close(timer_fd_); } void epoll_reactor::shutdown_service() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; while (descriptor_state* state = registered_descriptors_.first()) { for (int i = 0; i < max_ops; ++i) ops.push(state->op_queue_[i]); state->shutdown_ = true; registered_descriptors_.free(state); } timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void epoll_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) { if (epoll_fd_ != -1) ::close(epoll_fd_); epoll_fd_ = -1; epoll_fd_ = do_epoll_create(); if (timer_fd_ != -1) ::close(timer_fd_); timer_fd_ = -1; timer_fd_ = do_timerfd_create(); interrupter_.recreate(); // Add the interrupter's descriptor to epoll. epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); interrupter_.interrupt(); // Add the timer descriptor to epoll. if (timer_fd_ != -1) { ev.events = EPOLLIN | EPOLLERR; ev.data.ptr = &timer_fd_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); } update_timeout(); // Re-register all descriptors with epoll. mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); for (descriptor_state* state = registered_descriptors_.first(); state != 0; state = state->next_) { ev.events = state->registered_events_; ev.data.ptr = state; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev); if (result != 0) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "epoll re-registration"); } } } } void epoll_reactor::init_task() { io_service_.init_task(); } int epoll_reactor::register_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data) { descriptor_data = allocate_descriptor_state(); { mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); descriptor_data->reactor_ = this; descriptor_data->descriptor_ = descriptor; descriptor_data->shutdown_ = false; } epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; descriptor_data->registered_events_ = ev.events; ev.data.ptr = descriptor_data; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); if (result != 0) return errno; return 0; } int epoll_reactor::register_internal_descriptor( int op_type, socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op) { descriptor_data = allocate_descriptor_state(); { mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); descriptor_data->reactor_ = this; descriptor_data->descriptor_ = descriptor; descriptor_data->shutdown_ = false; descriptor_data->op_queue_[op_type].push(op); } epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; descriptor_data->registered_events_ = ev.events; ev.data.ptr = descriptor_data; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); if (result != 0) return errno; return 0; } void epoll_reactor::move_descriptor(socket_type, epoll_reactor::per_descriptor_data& target_descriptor_data, epoll_reactor::per_descriptor_data& source_descriptor_data) { target_descriptor_data = source_descriptor_data; source_descriptor_data = 0; } void epoll_reactor::start_op(int op_type, socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative) { if (!descriptor_data) { op->ec_ = asio::error::bad_descriptor; post_immediate_completion(op, is_continuation); return; } mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (descriptor_data->shutdown_) { post_immediate_completion(op, is_continuation); return; } if (descriptor_data->op_queue_[op_type].empty()) { if (allow_speculative && (op_type != read_op || descriptor_data->op_queue_[except_op].empty())) { if (op->perform()) { descriptor_lock.unlock(); io_service_.post_immediate_completion(op, is_continuation); return; } if (op_type == write_op) { if ((descriptor_data->registered_events_ & EPOLLOUT) == 0) { epoll_event ev = { 0, { 0 } }; ev.events = descriptor_data->registered_events_ | EPOLLOUT; ev.data.ptr = descriptor_data; if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0) { descriptor_data->registered_events_ |= ev.events; } else { op->ec_ = asio::error_code(errno, asio::error::get_system_category()); io_service_.post_immediate_completion(op, is_continuation); return; } } } } else { if (op_type == write_op) { descriptor_data->registered_events_ |= EPOLLOUT; } epoll_event ev = { 0, { 0 } }; ev.events = descriptor_data->registered_events_; ev.data.ptr = descriptor_data; epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev); } } descriptor_data->op_queue_[op_type].push(op); io_service_.work_started(); } void epoll_reactor::cancel_ops(socket_type, epoll_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_lock.unlock(); io_service_.post_deferred_completions(ops); } void epoll_reactor::deregister_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, bool closing) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { if (closing) { // The descriptor will be automatically removed from the epoll set when // it is closed. } else { epoll_event ev = { 0, { 0 } }; epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); } op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; io_service_.post_deferred_completions(ops); } } void epoll_reactor::deregister_internal_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { epoll_event ev = { 0, { 0 } }; epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); op_queue ops; for (int i = 0; i < max_ops; ++i) ops.push(descriptor_data->op_queue_[i]); descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; } } void epoll_reactor::run(bool block, op_queue& ops) { // This code relies on the fact that the task_io_service queues the reactor // task behind all descriptor operations generated by this function. This // means, that by the time we reach this point, any previously returned // descriptor operations have already been dequeued. Therefore it is now safe // for us to reuse and return them for the task_io_service to queue again. // Calculate a timeout only if timerfd is not used. int timeout; if (timer_fd_ != -1) timeout = block ? -1 : 0; else { mutex::scoped_lock lock(mutex_); timeout = block ? get_timeout() : 0; } // Block on the epoll descriptor. epoll_event events[128]; int num_events = epoll_wait(epoll_fd_, events, 128, timeout); #if defined(ASIO_HAS_TIMERFD) bool check_timers = (timer_fd_ == -1); #else // defined(ASIO_HAS_TIMERFD) bool check_timers = true; #endif // defined(ASIO_HAS_TIMERFD) // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = events[i].data.ptr; if (ptr == &interrupter_) { // No need to reset the interrupter since we're leaving the descriptor // in a ready-to-read state and relying on edge-triggered notifications // to make it so that we only get woken up when the descriptor's epoll // registration is updated. #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ == -1) check_timers = true; #else // defined(ASIO_HAS_TIMERFD) check_timers = true; #endif // defined(ASIO_HAS_TIMERFD) } #if defined(ASIO_HAS_TIMERFD) else if (ptr == &timer_fd_) { check_timers = true; } #endif // defined(ASIO_HAS_TIMERFD) else { // The descriptor operation doesn't count as work in and of itself, so we // don't call work_started() here. This still allows the io_service to // stop if the only remaining operations are descriptor operations. descriptor_state* descriptor_data = static_cast(ptr); descriptor_data->set_ready_events(events[i].events); ops.push(descriptor_data); } } if (check_timers) { mutex::scoped_lock common_lock(mutex_); timer_queues_.get_ready_timers(ops); #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ != -1) { itimerspec new_timeout; itimerspec old_timeout; int flags = get_timeout(new_timeout); timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); } #endif // defined(ASIO_HAS_TIMERFD) } } void epoll_reactor::interrupt() { epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev); } int epoll_reactor::do_epoll_create() { #if defined(EPOLL_CLOEXEC) int fd = epoll_create1(EPOLL_CLOEXEC); #else // defined(EPOLL_CLOEXEC) int fd = -1; errno = EINVAL; #endif // defined(EPOLL_CLOEXEC) if (fd == -1 && (errno == EINVAL || errno == ENOSYS)) { fd = epoll_create(epoll_size); if (fd != -1) ::fcntl(fd, F_SETFD, FD_CLOEXEC); } if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "epoll"); } return fd; } int epoll_reactor::do_timerfd_create() { #if defined(ASIO_HAS_TIMERFD) # if defined(TFD_CLOEXEC) int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); # else // defined(TFD_CLOEXEC) int fd = -1; errno = EINVAL; # endif // defined(TFD_CLOEXEC) if (fd == -1 && errno == EINVAL) { fd = timerfd_create(CLOCK_MONOTONIC, 0); if (fd != -1) ::fcntl(fd, F_SETFD, FD_CLOEXEC); } return fd; #else // defined(ASIO_HAS_TIMERFD) return -1; #endif // defined(ASIO_HAS_TIMERFD) } epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state() { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); return registered_descriptors_.alloc(); } void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s) { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); registered_descriptors_.free(s); } void epoll_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } void epoll_reactor::update_timeout() { #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ != -1) { itimerspec new_timeout; itimerspec old_timeout; int flags = get_timeout(new_timeout); timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); return; } #endif // defined(ASIO_HAS_TIMERFD) interrupt(); } int epoll_reactor::get_timeout() { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. return timer_queues_.wait_duration_msec(5 * 60 * 1000); } #if defined(ASIO_HAS_TIMERFD) int epoll_reactor::get_timeout(itimerspec& ts) { ts.it_interval.tv_sec = 0; ts.it_interval.tv_nsec = 0; long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); ts.it_value.tv_sec = usec / 1000000; ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1; return usec ? 0 : TFD_TIMER_ABSTIME; } #endif // defined(ASIO_HAS_TIMERFD) struct epoll_reactor::perform_io_cleanup_on_block_exit { explicit perform_io_cleanup_on_block_exit(epoll_reactor* r) : reactor_(r), first_op_(0) { } ~perform_io_cleanup_on_block_exit() { if (first_op_) { // Post the remaining completed operations for invocation. if (!ops_.empty()) reactor_->io_service_.post_deferred_completions(ops_); // A user-initiated operation has completed, but there's no need to // explicitly call work_finished() here. Instead, we'll take advantage of // the fact that the task_io_service will call work_finished() once we // return. } else { // No user-initiated operations have completed, so we need to compensate // for the work_finished() call that the task_io_service will make once // this operation returns. reactor_->io_service_.work_started(); } } epoll_reactor* reactor_; op_queue ops_; operation* first_op_; }; epoll_reactor::descriptor_state::descriptor_state() : operation(&epoll_reactor::descriptor_state::do_complete) { } operation* epoll_reactor::descriptor_state::perform_io(uint32_t events) { mutex_.lock(); perform_io_cleanup_on_block_exit io_cleanup(reactor_); mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock); // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI }; for (int j = max_ops - 1; j >= 0; --j) { if (events & (flag[j] | EPOLLERR | EPOLLHUP)) { while (reactor_op* op = op_queue_[j].front()) { if (op->perform()) { op_queue_[j].pop(); io_cleanup.ops_.push(op); } else break; } } } // The first operation will be returned for completion now. The others will // be posted for later by the io_cleanup object's destructor. io_cleanup.first_op_ = io_cleanup.ops_.front(); io_cleanup.ops_.pop(); return io_cleanup.first_op_; } void epoll_reactor::descriptor_state::do_complete( io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { if (owner) { descriptor_state* descriptor_data = static_cast(base); uint32_t events = static_cast(bytes_transferred); if (operation* op = descriptor_data->perform_io(events)) { op->complete(*owner, ec, 0); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP galera-3-25.3.20/asio/asio/detail/impl/win_object_handle_service.ipp0000644000015300001660000002712313042054732025124 0ustar jenkinsjenkins// // detail/impl/win_object_handle_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #include "asio/detail/win_object_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_object_handle_service::win_object_handle_service( asio::io_service& io_service) : io_service_(asio::use_service(io_service)), mutex_(), impl_list_(0), shutdown_(false) { } void win_object_handle_service::shutdown_service() { mutex::scoped_lock lock(mutex_); // Setting this flag to true prevents new objects from being registered, and // new asynchronous wait operations from being started. We only need to worry // about cleaning up the operations that are currently in progress. shutdown_ = true; op_queue ops; for (implementation_type* impl = impl_list_; impl; impl = impl->next_) ops.push(impl->op_queue_); lock.unlock(); io_service_.abandon_operations(ops); } void win_object_handle_service::construct( win_object_handle_service::implementation_type& impl) { impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.owner_ = this; // Insert implementation into linked list of all implementations. mutex::scoped_lock lock(mutex_); if (!shutdown_) { impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } } void win_object_handle_service::move_construct( win_object_handle_service::implementation_type& impl, win_object_handle_service::implementation_type& other_impl) { mutex::scoped_lock lock(mutex_); // Insert implementation into linked list of all implementations. if (!shutdown_) { impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = other_impl.wait_handle_; other_impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.op_queue_.push(other_impl.op_queue_); impl.owner_ = this; // We must not hold the lock while calling UnregisterWaitEx. This is because // the registered callback function might be invoked while we are waiting for // UnregisterWaitEx to complete. lock.unlock(); if (impl.wait_handle_ != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); if (!impl.op_queue_.empty()) register_wait_callback(impl, lock); } void win_object_handle_service::move_assign( win_object_handle_service::implementation_type& impl, win_object_handle_service& other_service, win_object_handle_service::implementation_type& other_impl) { asio::error_code ignored_ec; close(impl, ignored_ec); mutex::scoped_lock lock(mutex_); if (this != &other_service) { // Remove implementation from linked list of all implementations. if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = other_impl.wait_handle_; other_impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.op_queue_.push(other_impl.op_queue_); impl.owner_ = this; if (this != &other_service) { // Insert implementation into linked list of all implementations. impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } // We must not hold the lock while calling UnregisterWaitEx. This is because // the registered callback function might be invoked while we are waiting for // UnregisterWaitEx to complete. lock.unlock(); if (impl.wait_handle_ != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); if (!impl.op_queue_.empty()) register_wait_callback(impl, lock); } void win_object_handle_service::destroy( win_object_handle_service::implementation_type& impl) { mutex::scoped_lock lock(mutex_); // Remove implementation from linked list of all implementations. if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; if (is_open(impl)) { ASIO_HANDLER_OPERATION(("object_handle", &impl, "close")); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.op_queue_.pop(); ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); ::CloseHandle(impl.handle_); impl.handle_ = INVALID_HANDLE_VALUE; io_service_.post_deferred_completions(ops); } } asio::error_code win_object_handle_service::assign( win_object_handle_service::implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } impl.handle_ = handle; ec = asio::error_code(); return ec; } asio::error_code win_object_handle_service::close( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("object_handle", &impl, "close")); mutex::scoped_lock lock(mutex_); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { impl.op_queue_.pop(); op->ec_ = asio::error::operation_aborted; completed_ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); if (::CloseHandle(impl.handle_)) { impl.handle_ = INVALID_HANDLE_VALUE; ec = asio::error_code(); } else { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } io_service_.post_deferred_completions(completed_ops); } else { ec = asio::error_code(); } return ec; } asio::error_code win_object_handle_service::cancel( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("object_handle", &impl, "cancel")); mutex::scoped_lock lock(mutex_); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.op_queue_.pop(); completed_ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); ec = asio::error_code(); io_service_.post_deferred_completions(completed_ops); } else { ec = asio::error::bad_descriptor; } return ec; } void win_object_handle_service::wait( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { switch (::WaitForSingleObject(impl.handle_, INFINITE)) { case WAIT_FAILED: { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); break; } case WAIT_OBJECT_0: case WAIT_ABANDONED: default: ec = asio::error_code(); break; } } void win_object_handle_service::start_wait_op( win_object_handle_service::implementation_type& impl, wait_op* op) { io_service_.work_started(); if (is_open(impl)) { mutex::scoped_lock lock(mutex_); if (!shutdown_) { impl.op_queue_.push(op); // Only the first operation to be queued gets to register a wait callback. // Subsequent operations have to wait for the first to finish. if (impl.op_queue_.front() == op) register_wait_callback(impl, lock); } else { lock.unlock(); io_service_.post_deferred_completion(op); } } else { op->ec_ = asio::error::bad_descriptor; io_service_.post_deferred_completion(op); } } void win_object_handle_service::register_wait_callback( win_object_handle_service::implementation_type& impl, mutex::scoped_lock& lock) { lock.lock(); if (!RegisterWaitForSingleObject(&impl.wait_handle_, impl.handle_, &win_object_handle_service::wait_callback, &impl, INFINITE, WT_EXECUTEONLYONCE)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = ec; impl.op_queue_.pop(); completed_ops.push(op); } lock.unlock(); io_service_.post_deferred_completions(completed_ops); } } void win_object_handle_service::wait_callback(PVOID param, BOOLEAN) { implementation_type* impl = static_cast(param); mutex::scoped_lock lock(impl->owner_->mutex_); if (impl->wait_handle_ != INVALID_HANDLE_VALUE) { ::UnregisterWaitEx(impl->wait_handle_, NULL); impl->wait_handle_ = INVALID_HANDLE_VALUE; } if (wait_op* op = impl->op_queue_.front()) { op_queue completed_ops; op->ec_ = asio::error_code(); impl->op_queue_.pop(); completed_ops.push(op); if (!impl->op_queue_.empty()) { if (!RegisterWaitForSingleObject(&impl->wait_handle_, impl->handle_, &win_object_handle_service::wait_callback, param, INFINITE, WT_EXECUTEONLYONCE)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); while ((op = impl->op_queue_.front()) != 0) { op->ec_ = ec; impl->op_queue_.pop(); completed_ops.push(op); } } } io_service_impl& ios = impl->owner_->io_service_; lock.unlock(); ios.post_deferred_completions(completed_ops); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/reactive_socket_service_base.ipp0000644000015300001660000001662213042054732025634 0ustar jenkinsjenkins// // detail/reactive_socket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) \ && !defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/reactive_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_socket_service_base::reactive_socket_service_base( asio::io_service& io_service) : reactor_(use_service(io_service)) { reactor_.init_task(); } void reactive_socket_service_base::shutdown_service() { } void reactive_socket_service_base::construct( reactive_socket_service_base::base_implementation_type& impl) { impl.socket_ = invalid_socket; impl.state_ = 0; } void reactive_socket_service_base::base_move_construct( reactive_socket_service_base::base_implementation_type& impl, reactive_socket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; reactor_.move_descriptor(impl.socket_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_socket_service_base::base_move_assign( reactive_socket_service_base::base_implementation_type& impl, reactive_socket_service_base& other_service, reactive_socket_service_base::base_implementation_type& other_impl) { destroy(impl); impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; other_service.reactor_.move_descriptor(impl.socket_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_socket_service_base::destroy( reactive_socket_service_base::base_implementation_type& impl) { if (impl.socket_ != invalid_socket) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, (impl.state_ & socket_ops::possible_dup) == 0); asio::error_code ignored_ec; socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); } } asio::error_code reactive_socket_service_base::close( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, (impl.state_ & socket_ops::possible_dup) == 0); } socket_ops::close(impl.socket_, impl.state_, false, ec); // The descriptor is closed by the OS even if close() returns an error. // // (Actually, POSIX says the state of the descriptor is unspecified. On // Linux the descriptor is apparently closed anyway; e.g. see // http://lkml.org/lkml/2005/9/10/129 // We'll just have to assume that other OSes follow the same behaviour. The // known exception is when Windows's closesocket() function fails with // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close(). construct(impl); return ec; } asio::error_code reactive_socket_service_base::cancel( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("socket", &impl, "cancel")); reactor_.cancel_ops(impl.socket_, impl.reactor_data_); ec = asio::error_code(); return ec; } asio::error_code reactive_socket_service_base::do_open( reactive_socket_service_base::base_implementation_type& impl, int af, int type, int protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } socket_holder sock(socket_ops::socket(af, type, protocol, ec)); if (sock.get() == invalid_socket) return ec; if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.socket_ = sock.release(); switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } ec = asio::error_code(); return ec; } asio::error_code reactive_socket_service_base::do_assign( reactive_socket_service_base::base_implementation_type& impl, int type, const reactive_socket_service_base::native_handle_type& native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (int err = reactor_.register_descriptor( native_socket, impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.socket_ = native_socket; switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.state_ |= socket_ops::possible_dup; ec = asio::error_code(); return ec; } void reactive_socket_service_base::start_op( reactive_socket_service_base::base_implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop) { if (!noop) { if ((impl.state_ & socket_ops::non_blocking) || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { reactor_.start_op(op_type, impl.socket_, impl.reactor_data_, op, is_continuation, is_non_blocking); return; } } reactor_.post_immediate_completion(op, is_continuation); } void reactive_socket_service_base::start_accept_op( reactive_socket_service_base::base_implementation_type& impl, reactor_op* op, bool is_continuation, bool peer_is_open) { if (!peer_is_open) start_op(impl, reactor::read_op, op, true, is_continuation, false); else { op->ec_ = asio::error::already_open; reactor_.post_immediate_completion(op, is_continuation); } } void reactive_socket_service_base::start_connect_op( reactive_socket_service_base::base_implementation_type& impl, reactor_op* op, bool is_continuation, const socket_addr_type* addr, size_t addrlen) { if ((impl.state_ & socket_ops::non_blocking) || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) { if (op->ec_ == asio::error::in_progress || op->ec_ == asio::error::would_block) { op->ec_ = asio::error_code(); reactor_.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_, op, is_continuation, false); return; } } } reactor_.post_immediate_completion(op, is_continuation); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP galera-3-25.3.20/asio/asio/detail/impl/timer_queue_set.ipp0000644000015300001660000000427413042054732023147 0ustar jenkinsjenkins// // detail/impl/timer_queue_set.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP #define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { timer_queue_set::timer_queue_set() : first_(0) { } void timer_queue_set::insert(timer_queue_base* q) { q->next_ = first_; first_ = q; } void timer_queue_set::erase(timer_queue_base* q) { if (first_) { if (q == first_) { first_ = q->next_; q->next_ = 0; return; } for (timer_queue_base* p = first_; p->next_; p = p->next_) { if (p->next_ == q) { p->next_ = q->next_; q->next_ = 0; return; } } } } bool timer_queue_set::all_empty() const { for (timer_queue_base* p = first_; p; p = p->next_) if (!p->empty()) return false; return true; } long timer_queue_set::wait_duration_msec(long max_duration) const { long min_duration = max_duration; for (timer_queue_base* p = first_; p; p = p->next_) min_duration = p->wait_duration_msec(min_duration); return min_duration; } long timer_queue_set::wait_duration_usec(long max_duration) const { long min_duration = max_duration; for (timer_queue_base* p = first_; p; p = p->next_) min_duration = p->wait_duration_usec(min_duration); return min_duration; } void timer_queue_set::get_ready_timers(op_queue& ops) { for (timer_queue_base* p = first_; p; p = p->next_) p->get_ready_timers(ops); } void timer_queue_set::get_all_timers(op_queue& ops) { for (timer_queue_base* p = first_; p; p = p->next_) p->get_all_timers(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP galera-3-25.3.20/asio/asio/detail/impl/select_reactor.ipp0000644000015300001660000002064513042054732022746 0ustar jenkinsjenkins// // detail/impl/select_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP #define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include "asio/detail/bind_handler.hpp" #include "asio/detail/fd_set_adapter.hpp" #include "asio/detail/select_reactor.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { select_reactor::select_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), interrupter_(), #if defined(ASIO_HAS_IOCP) stop_thread_(false), thread_(0), #endif // defined(ASIO_HAS_IOCP) shutdown_(false) { #if defined(ASIO_HAS_IOCP) asio::detail::signal_blocker sb; thread_ = new asio::detail::thread( bind_handler(&select_reactor::call_run_thread, this)); #endif // defined(ASIO_HAS_IOCP) } select_reactor::~select_reactor() { shutdown_service(); } void select_reactor::shutdown_service() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; #if defined(ASIO_HAS_IOCP) stop_thread_ = true; #endif // defined(ASIO_HAS_IOCP) lock.unlock(); #if defined(ASIO_HAS_IOCP) if (thread_) { interrupter_.interrupt(); thread_->join(); delete thread_; thread_ = 0; } #endif // defined(ASIO_HAS_IOCP) op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].get_all_operations(ops); timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void select_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) interrupter_.recreate(); } void select_reactor::init_task() { io_service_.init_task(); } int select_reactor::register_descriptor(socket_type, select_reactor::per_descriptor_data&) { return 0; } int select_reactor::register_internal_descriptor( int op_type, socket_type descriptor, select_reactor::per_descriptor_data&, reactor_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue_[op_type].enqueue_operation(descriptor, op); interrupter_.interrupt(); return 0; } void select_reactor::move_descriptor(socket_type, select_reactor::per_descriptor_data&, select_reactor::per_descriptor_data&) { } void select_reactor::start_op(int op_type, socket_type descriptor, select_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation, bool) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { post_immediate_completion(op, is_continuation); return; } bool first = op_queue_[op_type].enqueue_operation(descriptor, op); io_service_.work_started(); if (first) interrupter_.interrupt(); } void select_reactor::cancel_ops(socket_type descriptor, select_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void select_reactor::deregister_descriptor(socket_type descriptor, select_reactor::per_descriptor_data&, bool) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void select_reactor::deregister_internal_descriptor( socket_type descriptor, select_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].cancel_operations(descriptor, ops); } void select_reactor::run(bool block, op_queue& ops) { asio::detail::mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_IOCP) // Check if the thread is supposed to stop. if (stop_thread_) return; #endif // defined(ASIO_HAS_IOCP) // Set up the descriptor sets. for (int i = 0; i < max_select_ops; ++i) fd_sets_[i].reset(); fd_sets_[read_op].set(interrupter_.read_descriptor()); socket_type max_fd = 0; bool have_work_to_do = !timer_queues_.all_empty(); for (int i = 0; i < max_select_ops; ++i) { have_work_to_do = have_work_to_do || !op_queue_[i].empty(); fd_sets_[i].set(op_queue_[i], ops); if (fd_sets_[i].max_descriptor() > max_fd) max_fd = fd_sets_[i].max_descriptor(); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Connection operations on Windows use both except and write fd_sets. have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty(); fd_sets_[write_op].set(op_queue_[connect_op], ops); if (fd_sets_[write_op].max_descriptor() > max_fd) max_fd = fd_sets_[write_op].max_descriptor(); fd_sets_[except_op].set(op_queue_[connect_op], ops); if (fd_sets_[except_op].max_descriptor() > max_fd) max_fd = fd_sets_[except_op].max_descriptor(); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (!block && !have_work_to_do) return; // Determine how long to block while waiting for events. timeval tv_buf = { 0, 0 }; timeval* tv = block ? get_timeout(tv_buf) : &tv_buf; lock.unlock(); // Block on the select call until descriptors become ready. asio::error_code ec; int retval = socket_ops::select(static_cast(max_fd + 1), fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec); // Reset the interrupter. if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor())) { interrupter_.reset(); --retval; } lock.lock(); // Dispatch all ready operations. if (retval > 0) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Connection operations on Windows use both except and write fd_sets. fd_sets_[except_op].perform(op_queue_[connect_op], ops); fd_sets_[write_op].perform(op_queue_[connect_op], ops); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. for (int i = max_select_ops - 1; i >= 0; --i) fd_sets_[i].perform(op_queue_[i], ops); } timer_queues_.get_ready_timers(ops); } void select_reactor::interrupt() { interrupter_.interrupt(); } #if defined(ASIO_HAS_IOCP) void select_reactor::run_thread() { asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { lock.unlock(); op_queue ops; run(true, ops); io_service_.post_deferred_completions(ops); lock.lock(); } } void select_reactor::call_run_thread(select_reactor* reactor) { reactor->run_thread(); } #endif // defined(ASIO_HAS_IOCP) void select_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void select_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } timeval* select_reactor::get_timeout(timeval& tv) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); tv.tv_sec = usec / 1000000; tv.tv_usec = usec % 1000000; return &tv; } void select_reactor::cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec) { bool need_interrupt = false; op_queue ops; for (int i = 0; i < max_ops; ++i) need_interrupt = op_queue_[i].cancel_operations( descriptor, ops, ec) || need_interrupt; io_service_.post_deferred_completions(ops); if (need_interrupt) interrupter_.interrupt(); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE)) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP galera-3-25.3.20/asio/asio/detail/impl/win_iocp_socket_service_base.ipp0000644000015300001660000005374213042054732025645 0ustar jenkinsjenkins// // detail/impl/win_iocp_socket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_iocp_socket_service_base::win_iocp_socket_service_base( asio::io_service& io_service) : io_service_(io_service), iocp_service_(use_service(io_service)), reactor_(0), connect_ex_(0), mutex_(), impl_list_(0) { } void win_iocp_socket_service_base::shutdown_service() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); base_implementation_type* impl = impl_list_; while (impl) { asio::error_code ignored_ec; close_for_destruction(*impl); impl = impl->next_; } } void win_iocp_socket_service_base::construct( win_iocp_socket_service_base::base_implementation_type& impl) { impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_socket_service_base::base_move_construct( win_iocp_socket_service_base::base_implementation_type& impl, win_iocp_socket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; impl.cancel_token_ = other_impl.cancel_token_; other_impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_socket_service_base::base_move_assign( win_iocp_socket_service_base::base_implementation_type& impl, win_iocp_socket_service_base& other_service, win_iocp_socket_service_base::base_implementation_type& other_impl) { close_for_destruction(impl); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; impl.cancel_token_ = other_impl.cancel_token_; other_impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void win_iocp_socket_service_base::destroy( win_iocp_socket_service_base::base_implementation_type& impl) { close_for_destruction(impl); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code win_iocp_socket_service_base::close( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); // Check if the reactor was created, in which case we need to close the // socket on the reactor as well to cancel any operations that might be // running there. reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); } socket_ops::close(impl.socket_, impl.state_, false, ec); impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) return ec; } asio::error_code win_iocp_socket_service_base::cancel( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("socket", &impl, "cancel")); if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) { // The version of Windows supports cancellation from any thread. typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr; socket_type sock = impl.socket_; HANDLE sock_as_handle = reinterpret_cast(sock); if (!cancel_io_ex(sock_as_handle, 0)) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_NOT_FOUND) { // ERROR_NOT_FOUND means that there were no operations to be // cancelled. We swallow this error to match the behaviour on other // platforms. ec = asio::error_code(); } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } } else { ec = asio::error_code(); } } #if defined(ASIO_ENABLE_CANCELIO) else if (impl.safe_cancellation_thread_id_ == 0) { // No operations have been started, so there's nothing to cancel. ec = asio::error_code(); } else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) { // Asynchronous operations have been started from the current thread only, // so it is safe to try to cancel them using CancelIo. socket_type sock = impl.socket_; HANDLE sock_as_handle = reinterpret_cast(sock); if (!::CancelIo(sock_as_handle)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } } else { // Asynchronous operations have been started from more than one thread, // so cancellation is not safe. ec = asio::error::operation_not_supported; } #else // defined(ASIO_ENABLE_CANCELIO) else { // Cancellation is not supported as CancelIo may not be used. ec = asio::error::operation_not_supported; } #endif // defined(ASIO_ENABLE_CANCELIO) // Cancel any operations started via the reactor. if (!ec) { reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->cancel_ops(impl.socket_, impl.reactor_data_); } return ec; } asio::error_code win_iocp_socket_service_base::do_open( win_iocp_socket_service_base::base_implementation_type& impl, int family, int type, int protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } socket_holder sock(socket_ops::socket(family, type, protocol, ec)); if (sock.get() == invalid_socket) return ec; HANDLE sock_as_handle = reinterpret_cast(sock.get()); if (iocp_service_.register_handle(sock_as_handle, ec)) return ec; impl.socket_ = sock.release(); switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.cancel_token_.reset(static_cast(0), socket_ops::noop_deleter()); ec = asio::error_code(); return ec; } asio::error_code win_iocp_socket_service_base::do_assign( win_iocp_socket_service_base::base_implementation_type& impl, int type, socket_type native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } HANDLE sock_as_handle = reinterpret_cast(native_socket); if (iocp_service_.register_handle(sock_as_handle, ec)) return ec; impl.socket_ = native_socket; switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.cancel_token_.reset(static_cast(0), socket_ops::noop_deleter()); ec = asio::error_code(); return ec; } void win_iocp_socket_service_base::start_send_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (noop) iocp_service_.on_completion(op); else if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; int result = ::WSASend(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, flags, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_send_to_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, const socket_addr_type* addr, int addrlen, socket_base::message_flags flags, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; int result = ::WSASendTo(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, flags, addr, addrlen, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_receive_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (noop) iocp_service_.on_completion(op); else if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = ::WSARecv(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, &recv_flags, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_NETNAME_DELETED) last_error = WSAECONNRESET; else if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_null_buffers_receive_op( win_iocp_socket_service_base::base_implementation_type& impl, socket_base::message_flags flags, reactor_op* op) { if ((impl.state_ & socket_ops::stream_oriented) != 0) { // For stream sockets on Windows, we may issue a 0-byte overlapped // WSARecv to wait until there is data available on the socket. ::WSABUF buf = { 0, 0 }; start_receive_op(impl, &buf, 1, flags, false, op); } else { start_reactor_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, op); } } void win_iocp_socket_service_base::start_receive_from_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, socket_base::message_flags flags, int* addrlen, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = ::WSARecvFrom(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, &recv_flags, addr, addrlen, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_accept_op( win_iocp_socket_service_base::base_implementation_type& impl, bool peer_is_open, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else if (peer_is_open) iocp_service_.on_completion(op, asio::error::already_open); else { asio::error_code ec; new_socket.reset(socket_ops::socket(family, type, protocol, ec)); if (new_socket.get() == invalid_socket) iocp_service_.on_completion(op, ec); else { DWORD bytes_read = 0; BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer, 0, address_length, address_length, &bytes_read, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); } } } void win_iocp_socket_service_base::restart_accept_op( socket_type s, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op) { new_socket.reset(); iocp_service_.work_started(); asio::error_code ec; new_socket.reset(socket_ops::socket(family, type, protocol, ec)); if (new_socket.get() == invalid_socket) iocp_service_.on_completion(op, ec); else { DWORD bytes_read = 0; BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer, 0, address_length, address_length, &bytes_read, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_reactor_op( win_iocp_socket_service_base::base_implementation_type& impl, int op_type, reactor_op* op) { reactor& r = get_reactor(); update_cancellation_thread_id(impl); if (is_open(impl)) { r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false); return; } else op->ec_ = asio::error::bad_descriptor; iocp_service_.post_immediate_completion(op, false); } void win_iocp_socket_service_base::start_connect_op( win_iocp_socket_service_base::base_implementation_type& impl, int family, int type, const socket_addr_type* addr, std::size_t addrlen, win_iocp_socket_connect_op_base* op) { // If ConnectEx is available, use that. if (family == ASIO_OS_DEF(AF_INET) || family == ASIO_OS_DEF(AF_INET6)) { if (connect_ex_fn connect_ex = get_connect_ex(impl, type)) { union address_union { socket_addr_type base; sockaddr_in4_type v4; sockaddr_in6_type v6; } a; using namespace std; // For memset. memset(&a, 0, sizeof(a)); a.base.sa_family = family; socket_ops::bind(impl.socket_, &a.base, family == ASIO_OS_DEF(AF_INET) ? sizeof(a.v4) : sizeof(a.v6), op->ec_); if (op->ec_ && op->ec_ != asio::error::invalid_argument) { iocp_service_.post_immediate_completion(op, false); return; } op->connect_ex_ = true; update_cancellation_thread_id(impl); iocp_service_.work_started(); BOOL result = connect_ex(impl.socket_, addr, static_cast(addrlen), 0, 0, 0, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); return; } } // Otherwise, fall back to a reactor-based implementation. reactor& r = get_reactor(); update_cancellation_thread_id(impl); if ((impl.state_ & socket_ops::non_blocking) != 0 || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) { if (op->ec_ == asio::error::in_progress || op->ec_ == asio::error::would_block) { op->ec_ = asio::error_code(); r.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_, op, false, false); return; } } } r.post_immediate_completion(op, false); } void win_iocp_socket_service_base::close_for_destruction( win_iocp_socket_service_base::base_implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); // Check if the reactor was created, in which case we need to close the // socket on the reactor as well to cancel any operations that might be // running there. reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); } asio::error_code ignored_ec; socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) } void win_iocp_socket_service_base::update_cancellation_thread_id( win_iocp_socket_service_base::base_implementation_type& impl) { #if defined(ASIO_ENABLE_CANCELIO) if (impl.safe_cancellation_thread_id_ == 0) impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) impl.safe_cancellation_thread_id_ = ~DWORD(0); #else // defined(ASIO_ENABLE_CANCELIO) (void)impl; #endif // defined(ASIO_ENABLE_CANCELIO) } reactor& win_iocp_socket_service_base::get_reactor() { reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (!r) { r = &(use_service(io_service_)); interlocked_exchange_pointer(reinterpret_cast(&reactor_), r); } return *r; } win_iocp_socket_service_base::connect_ex_fn win_iocp_socket_service_base::get_connect_ex( win_iocp_socket_service_base::base_implementation_type& impl, int type) { #if defined(ASIO_DISABLE_CONNECTEX) (void)impl; (void)type; return 0; #else // defined(ASIO_DISABLE_CONNECTEX) if (type != ASIO_OS_DEF(SOCK_STREAM) && type != ASIO_OS_DEF(SOCK_SEQPACKET)) return 0; void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0); if (!ptr) { GUID guid = { 0x25a207b9, 0xddf3, 0x4660, { 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } }; DWORD bytes = 0; if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0) { // Set connect_ex_ to a special value to indicate that ConnectEx is // unavailable. That way we won't bother trying to look it up again. ptr = this; } interlocked_exchange_pointer(&connect_ex_, ptr); } return reinterpret_cast(ptr == this ? 0 : ptr); #endif // defined(ASIO_DISABLE_CONNECTEX) } void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer( void** dest, void* exch, void* cmp) { #if defined(_M_IX86) return reinterpret_cast(InterlockedCompareExchange( reinterpret_cast(dest), reinterpret_cast(exch), reinterpret_cast(cmp))); #else return InterlockedCompareExchangePointer(dest, exch, cmp); #endif } void* win_iocp_socket_service_base::interlocked_exchange_pointer( void** dest, void* val) { #if defined(_M_IX86) return reinterpret_cast(InterlockedExchange( reinterpret_cast(dest), reinterpret_cast(val))); #else return InterlockedExchangePointer(dest, val); #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP galera-3-25.3.20/asio/asio/detail/impl/winrt_timer_scheduler.hpp0000644000015300001660000000412313042054732024341 0ustar jenkinsjenkins// // detail/impl/winrt_timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void winrt_timer_scheduler::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void winrt_timer_scheduler::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void winrt_timer_scheduler::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) event_.signal(lock); } template std::size_t winrt_timer_scheduler::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP galera-3-25.3.20/asio/asio/detail/impl/win_iocp_serial_port_service.ipp0000644000015300001660000001223313042054732025674 0ustar jenkinsjenkins// // detail/impl/win_iocp_serial_port_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/detail/win_iocp_serial_port_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_iocp_serial_port_service::win_iocp_serial_port_service( asio::io_service& io_service) : handle_service_(io_service) { } void win_iocp_serial_port_service::shutdown_service() { } asio::error_code win_iocp_serial_port_service::open( win_iocp_serial_port_service::implementation_type& impl, const std::string& device, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } // For convenience, add a leading \\.\ sequence if not already present. std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device; // Open a handle to the serial port. ::HANDLE handle = ::CreateFileA(name.c_str(), GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0); if (handle == INVALID_HANDLE_VALUE) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Determine the initial serial port parameters. using namespace std; // For memset. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle, &dcb)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Set some default serial port parameters. This implementation does not // support changing these, so they might as well be in a known state. dcb.fBinary = TRUE; // Win32 only supports binary mode. dcb.fDsrSensitivity = FALSE; dcb.fNull = FALSE; // Do not ignore NULL characters. dcb.fAbortOnError = FALSE; // Ignore serial framing errors. if (!::SetCommState(handle, &dcb)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Set up timeouts so that the serial port will behave similarly to a // network socket. Reads wait for at least one byte, then return with // whatever they have. Writes return once everything is out the door. ::COMMTIMEOUTS timeouts; timeouts.ReadIntervalTimeout = 1; timeouts.ReadTotalTimeoutMultiplier = 0; timeouts.ReadTotalTimeoutConstant = 0; timeouts.WriteTotalTimeoutMultiplier = 0; timeouts.WriteTotalTimeoutConstant = 0; if (!::SetCommTimeouts(handle, &timeouts)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // We're done. Take ownership of the serial port handle. if (handle_service_.assign(impl, handle, ec)) ::CloseHandle(handle); return ec; } asio::error_code win_iocp_serial_port_service::do_set_option( win_iocp_serial_port_service::implementation_type& impl, win_iocp_serial_port_service::store_function_type store, const void* option, asio::error_code& ec) { using namespace std; // For memcpy. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } if (store(option, dcb, ec)) return ec; if (!::SetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } ec = asio::error_code(); return ec; } asio::error_code win_iocp_serial_port_service::do_get_option( const win_iocp_serial_port_service::implementation_type& impl, win_iocp_serial_port_service::load_function_type load, void* option, asio::error_code& ec) const { using namespace std; // For memset. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } return load(option, dcb, ec); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/win_iocp_io_service.hpp0000644000015300001660000000703413042054732023762 0ustar jenkinsjenkins// // detail/impl/win_iocp_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP #define ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void win_iocp_io_service::dispatch(Handler& handler) { if (thread_call_stack::contains(this)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); } else { // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch")); post_immediate_completion(p.p, false); p.v = p.p = 0; } } template void win_iocp_io_service::post(Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "post")); post_immediate_completion(p.p, false); p.v = p.p = 0; } template void win_iocp_io_service::add_timer_queue( timer_queue& queue) { do_add_timer_queue(queue); } template void win_iocp_io_service::remove_timer_queue( timer_queue& queue) { do_remove_timer_queue(queue); } template void win_iocp_io_service::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { // If the service has been shut down we silently discard the timer. if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) { post_immediate_completion(op, false); return; } mutex::scoped_lock lock(dispatch_mutex_); bool earliest = queue.enqueue_timer(time, timer, op); work_started(); if (earliest) update_timeout(); } template std::size_t win_iocp_io_service::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { // If the service has been shut down we silently ignore the cancellation. if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) return 0; mutex::scoped_lock lock(dispatch_mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/impl/winsock_init.ipp0000644000015300001660000000357413042054732022452 0ustar jenkinsjenkins// // detail/impl/winsock_init.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP #define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void winsock_init_base::startup(data& d, unsigned char major, unsigned char minor) { if (::InterlockedIncrement(&d.init_count_) == 1) { WSADATA wsa_data; long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data); ::InterlockedExchange(&d.result_, result); } } void winsock_init_base::manual_startup(data& d) { if (::InterlockedIncrement(&d.init_count_) == 1) { ::InterlockedExchange(&d.result_, 0); } } void winsock_init_base::cleanup(data& d) { if (::InterlockedDecrement(&d.init_count_) == 0) { ::WSACleanup(); } } void winsock_init_base::manual_cleanup(data& d) { ::InterlockedDecrement(&d.init_count_); } void winsock_init_base::throw_on_error(data& d) { long result = ::InterlockedExchangeAdd(&d.result_, 0); if (result != 0) { asio::error_code ec(result, asio::error::get_system_category()); asio::detail::throw_error(ec, "winsock"); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP galera-3-25.3.20/asio/asio/detail/impl/strand_service.hpp0000644000015300001660000000642313042054732022760 0ustar jenkinsjenkins// // detail/impl/strand_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP #define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/addressof.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { inline strand_service::strand_impl::strand_impl() : operation(&strand_service::do_complete), locked_(false) { } struct strand_service::on_dispatch_exit { io_service_impl* io_service_; strand_impl* impl_; ~on_dispatch_exit() { impl_->mutex_.lock(); impl_->ready_queue_.push(impl_->waiting_queue_); bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); impl_->mutex_.unlock(); if (more_handlers) io_service_->post_immediate_completion(impl_, false); } }; template void strand_service::dispatch(strand_service::implementation_type& impl, Handler& handler) { // If we are already in the strand then the handler can run immediately. if (call_stack::contains(impl)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); return; } // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "strand", impl, "dispatch")); bool dispatch_immediately = do_dispatch(impl, p.p); operation* o = p.p; p.v = p.p = 0; if (dispatch_immediately) { // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_dispatch_exit on_exit = { &io_service_, impl }; (void)on_exit; completion_handler::do_complete( &io_service_, o, asio::error_code(), 0); } } // Request the io_service to invoke the given handler and return immediately. template void strand_service::post(strand_service::implementation_type& impl, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "strand", impl, "post")); do_post(impl, p.p, is_continuation); p.v = p.p = 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP galera-3-25.3.20/asio/asio/detail/impl/signal_set_service.ipp0000644000015300001660000004427413042054732023624 0ustar jenkinsjenkins// // detail/impl/signal_set_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP #define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/reactor.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/signal_set_service.hpp" #include "asio/detail/static_mutex.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct signal_state { // Mutex used for protecting global state. static_mutex mutex_; // The read end of the pipe used for signal notifications. int read_descriptor_; // The write end of the pipe used for signal notifications. int write_descriptor_; // Whether the signal state has been prepared for a fork. bool fork_prepared_; // The head of a linked list of all signal_set_service instances. class signal_set_service* service_list_; // A count of the number of objects that are registered for each signal. std::size_t registration_count_[max_signal_number]; }; signal_state* get_signal_state() { static signal_state state = { ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } }; return &state; } void asio_signal_handler(int signal_number) { #if defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) signal_set_service::deliver_signal(signal_number); #else // defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) int saved_errno = errno; signal_state* state = get_signal_state(); signed_size_type result = ::write(state->write_descriptor_, &signal_number, sizeof(signal_number)); (void)result; errno = saved_errno; #endif // defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) #if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) ::signal(signal_number, asio_signal_handler); #endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) } #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) class signal_set_service::pipe_read_op : public reactor_op { public: pipe_read_op() : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete) { } static bool do_perform(reactor_op*) { signal_state* state = get_signal_state(); int fd = state->read_descriptor_; int signal_number = 0; while (::read(fd, &signal_number, sizeof(int)) == sizeof(int)) if (signal_number >= 0 && signal_number < max_signal_number) signal_set_service::deliver_signal(signal_number); return false; } static void do_complete(io_service_impl* /*owner*/, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { pipe_read_op* o(static_cast(base)); delete o; } }; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) signal_set_service::signal_set_service( asio::io_service& io_service) : io_service_(asio::use_service(io_service)), #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) reactor_(asio::use_service(io_service)), #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) next_(0), prev_(0) { get_signal_state()->mutex_.init(); #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) reactor_.init_task(); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) for (int i = 0; i < max_signal_number; ++i) registrations_[i] = 0; add_service(this); } signal_set_service::~signal_set_service() { remove_service(this); } void signal_set_service::shutdown_service() { remove_service(this); op_queue ops; for (int i = 0; i < max_signal_number; ++i) { registration* reg = registrations_[i]; while (reg) { ops.push(*reg->queue_); reg = reg->next_in_table_; } } io_service_.abandon_operations(ops); } void signal_set_service::fork_service( asio::io_service::fork_event fork_ev) { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); switch (fork_ev) { case asio::io_service::fork_prepare: { int read_descriptor = state->read_descriptor_; state->fork_prepared_ = true; lock.unlock(); reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_); } break; case asio::io_service::fork_parent: if (state->fork_prepared_) { int read_descriptor = state->read_descriptor_; state->fork_prepared_ = false; lock.unlock(); reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, reactor_data_, new pipe_read_op); } break; case asio::io_service::fork_child: if (state->fork_prepared_) { asio::detail::signal_blocker blocker; close_descriptors(); open_descriptors(); int read_descriptor = state->read_descriptor_; state->fork_prepared_ = false; lock.unlock(); reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, reactor_data_, new pipe_read_op); } break; default: break; } #else // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) (void)fork_ev; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::construct( signal_set_service::implementation_type& impl) { impl.signals_ = 0; } void signal_set_service::destroy( signal_set_service::implementation_type& impl) { asio::error_code ignored_ec; clear(impl, ignored_ec); cancel(impl, ignored_ec); } asio::error_code signal_set_service::add( signal_set_service::implementation_type& impl, int signal_number, asio::error_code& ec) { // Check that the signal number is valid. if (signal_number < 0 || signal_number >= max_signal_number) { ec = asio::error::invalid_argument; return ec; } signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); // Find the appropriate place to insert the registration. registration** insertion_point = &impl.signals_; registration* next = impl.signals_; while (next && next->signal_number_ < signal_number) { insertion_point = &next->next_in_set_; next = next->next_in_set_; } // Only do something if the signal is not already registered. if (next == 0 || next->signal_number_ != signal_number) { registration* new_registration = new registration; #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Register for the signal if we're the first. if (state->registration_count_[signal_number] == 0) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = asio_signal_handler; sigfillset(&sa.sa_mask); if (::sigaction(signal_number, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(signal_number, asio_signal_handler) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) delete new_registration; return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Record the new registration in the set. new_registration->signal_number_ = signal_number; new_registration->queue_ = &impl.queue_; new_registration->next_in_set_ = next; *insertion_point = new_registration; // Insert registration into the registration table. new_registration->next_in_table_ = registrations_[signal_number]; if (registrations_[signal_number]) registrations_[signal_number]->prev_in_table_ = new_registration; registrations_[signal_number] = new_registration; ++state->registration_count_[signal_number]; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::remove( signal_set_service::implementation_type& impl, int signal_number, asio::error_code& ec) { // Check that the signal number is valid. if (signal_number < 0 || signal_number >= max_signal_number) { ec = asio::error::invalid_argument; return ec; } signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); // Find the signal number in the list of registrations. registration** deletion_point = &impl.signals_; registration* reg = impl.signals_; while (reg && reg->signal_number_ < signal_number) { deletion_point = ®->next_in_set_; reg = reg->next_in_set_; } if (reg != 0 && reg->signal_number_ == signal_number) { #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Set signal handler back to the default if we're the last. if (state->registration_count_[signal_number] == 1) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; if (::sigaction(signal_number, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(signal_number, SIG_DFL) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Remove the registration from the set. *deletion_point = reg->next_in_set_; // Remove the registration from the registration table. if (registrations_[signal_number] == reg) registrations_[signal_number] = reg->next_in_table_; if (reg->prev_in_table_) reg->prev_in_table_->next_in_table_ = reg->next_in_table_; if (reg->next_in_table_) reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; --state->registration_count_[signal_number]; delete reg; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::clear( signal_set_service::implementation_type& impl, asio::error_code& ec) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); while (registration* reg = impl.signals_) { #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Set signal handler back to the default if we're the last. if (state->registration_count_[reg->signal_number_] == 1) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; if (::sigaction(reg->signal_number_, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Remove the registration from the registration table. if (registrations_[reg->signal_number_] == reg) registrations_[reg->signal_number_] = reg->next_in_table_; if (reg->prev_in_table_) reg->prev_in_table_->next_in_table_ = reg->next_in_table_; if (reg->next_in_table_) reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; --state->registration_count_[reg->signal_number_]; impl.signals_ = reg->next_in_set_; delete reg; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::cancel( signal_set_service::implementation_type& impl, asio::error_code& ec) { ASIO_HANDLER_OPERATION(("signal_set", &impl, "cancel")); op_queue ops; { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); while (signal_op* op = impl.queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.queue_.pop(); ops.push(op); } } io_service_.post_deferred_completions(ops); ec = asio::error_code(); return ec; } void signal_set_service::deliver_signal(int signal_number) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); signal_set_service* service = state->service_list_; while (service) { op_queue ops; registration* reg = service->registrations_[signal_number]; while (reg) { if (reg->queue_->empty()) { ++reg->undelivered_; } else { while (signal_op* op = reg->queue_->front()) { op->signal_number_ = signal_number; reg->queue_->pop(); ops.push(op); } } reg = reg->next_in_table_; } service->io_service_.post_deferred_completions(ops); service = service->next_; } } void signal_set_service::add_service(signal_set_service* service) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If this is the first service to be created, open a new pipe. if (state->service_list_ == 0) open_descriptors(); #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // Insert service into linked list of all services. service->next_ = state->service_list_; service->prev_ = 0; if (state->service_list_) state->service_list_->prev_ = service; state->service_list_ = service; #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // Register for pipe readiness notifications. int read_descriptor = state->read_descriptor_; lock.unlock(); service->reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, service->reactor_data_, new pipe_read_op); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::remove_service(signal_set_service* service) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); if (service->next_ || service->prev_ || state->service_list_ == service) { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // Disable the pipe readiness notifications. int read_descriptor = state->read_descriptor_; lock.unlock(); service->reactor_.deregister_descriptor( read_descriptor, service->reactor_data_, false); lock.lock(); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) // Remove service from linked list of all services. if (state->service_list_ == service) state->service_list_ = service->next_; if (service->prev_) service->prev_->next_ = service->next_; if (service->next_) service->next_->prev_= service->prev_; service->next_ = 0; service->prev_ = 0; #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If this is the last service to be removed, close the pipe. if (state->service_list_ == 0) close_descriptors(); #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) } } void signal_set_service::open_descriptors() { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); int pipe_fds[2]; if (::pipe(pipe_fds) == 0) { state->read_descriptor_ = pipe_fds[0]; ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK); state->write_descriptor_ = pipe_fds[1]; ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK); #if defined(FD_CLOEXEC) ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC); ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC); #endif // defined(FD_CLOEXEC) } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "signal_set_service pipe"); } #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::close_descriptors() { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); if (state->read_descriptor_ != -1) ::close(state->read_descriptor_); state->read_descriptor_ = -1; if (state->write_descriptor_ != -1) ::close(state->write_descriptor_); state->write_descriptor_ = -1; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::start_wait_op( signal_set_service::implementation_type& impl, signal_op* op) { io_service_.work_started(); signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); registration* reg = impl.signals_; while (reg) { if (reg->undelivered_ > 0) { --reg->undelivered_; op->signal_number_ = reg->signal_number_; io_service_.post_deferred_completion(op); return; } reg = reg->next_in_set_; } impl.queue_.push(op); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/win_mutex.ipp0000644000015300001660000000344113042054732021762 0ustar jenkinsjenkins// // detail/impl/win_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP #define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_mutex.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_mutex::win_mutex() { int error = do_init(); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "mutex"); } int win_mutex::do_init() { #if defined(__MINGW32__) // Not sure if MinGW supports structured exception handling, so for now // we'll just call the Windows API and hope. # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) return ::GetLastError(); # endif return 0; #else __try { # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) return ::GetLastError(); # endif } __except(GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { return ERROR_OUTOFMEMORY; } return 0; #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP galera-3-25.3.20/asio/asio/detail/impl/kqueue_reactor.hpp0000644000015300001660000000407713042054732022766 0ustar jenkinsjenkins// // detail/impl/kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP #define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void kqueue_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void kqueue_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void kqueue_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) interrupt(); } template std::size_t kqueue_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/impl/socket_select_interrupter.ipp0000644000015300001660000001247113042054732025240 0ustar jenkinsjenkins// // detail/impl/socket_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_select_interrupter.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { socket_select_interrupter::socket_select_interrupter() { open_descriptors(); } void socket_select_interrupter::open_descriptors() { asio::error_code ec; socket_holder acceptor(socket_ops::socket( AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); if (acceptor.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); int opt = 1; socket_ops::state_type acceptor_state = 0; socket_ops::setsockopt(acceptor.get(), acceptor_state, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec); using namespace std; // For memset. sockaddr_in4_type addr; std::size_t addr_len = sizeof(addr); memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); addr.sin_port = 0; if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr, addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr, &addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); // Some broken firewalls on Windows will intermittently cause getsockname to // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We // explicitly specify the target address here to work around this problem. addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); if (socket_ops::listen(acceptor.get(), SOMAXCONN, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); socket_holder client(socket_ops::socket( AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); if (client.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr, addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec)); if (server.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); ioctl_arg_type non_blocking = 1; socket_ops::state_type client_state = 0; if (socket_ops::ioctl(client.get(), client_state, FIONBIO, &non_blocking, ec)) asio::detail::throw_error(ec, "socket_select_interrupter"); opt = 1; socket_ops::setsockopt(client.get(), client_state, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); non_blocking = 1; socket_ops::state_type server_state = 0; if (socket_ops::ioctl(server.get(), server_state, FIONBIO, &non_blocking, ec)) asio::detail::throw_error(ec, "socket_select_interrupter"); opt = 1; socket_ops::setsockopt(server.get(), server_state, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); read_descriptor_ = server.release(); write_descriptor_ = client.release(); } socket_select_interrupter::~socket_select_interrupter() { close_descriptors(); } void socket_select_interrupter::close_descriptors() { asio::error_code ec; socket_ops::state_type state = socket_ops::internal_non_blocking; if (read_descriptor_ != invalid_socket) socket_ops::close(read_descriptor_, state, true, ec); if (write_descriptor_ != invalid_socket) socket_ops::close(write_descriptor_, state, true, ec); } void socket_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = invalid_socket; read_descriptor_ = invalid_socket; open_descriptors(); } void socket_select_interrupter::interrupt() { char byte = 0; socket_ops::buf b; socket_ops::init_buf(b, &byte, 1); asio::error_code ec; socket_ops::send(write_descriptor_, &b, 1, 0, ec); } bool socket_select_interrupter::reset() { char data[1024]; socket_ops::buf b; socket_ops::init_buf(b, data, sizeof(data)); asio::error_code ec; int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); return was_interrupted; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP galera-3-25.3.20/asio/asio/detail/impl/win_iocp_io_service.ipp0000644000015300001660000003462413042054732023770 0ustar jenkinsjenkins// // detail/impl/win_iocp_io_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_iocp_io_service::work_finished_on_block_exit { ~work_finished_on_block_exit() { io_service_->work_finished(); } win_iocp_io_service* io_service_; }; struct win_iocp_io_service::timer_thread_function { void operator()() { while (::InterlockedExchangeAdd(&io_service_->shutdown_, 0) == 0) { if (::WaitForSingleObject(io_service_->waitable_timer_.handle, INFINITE) == WAIT_OBJECT_0) { ::InterlockedExchange(&io_service_->dispatch_required_, 1); ::PostQueuedCompletionStatus(io_service_->iocp_.handle, 0, wake_for_dispatch, 0); } } } win_iocp_io_service* io_service_; }; win_iocp_io_service::win_iocp_io_service( asio::io_service& io_service, size_t concurrency_hint) : asio::detail::service_base(io_service), iocp_(), outstanding_work_(0), stopped_(0), stop_event_posted_(0), shutdown_(0), gqcs_timeout_(get_gqcs_timeout()), dispatch_required_(0) { ASIO_HANDLER_TRACKING_INIT; iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, static_cast(concurrency_hint < DWORD(~0) ? concurrency_hint : DWORD(~0))); if (!iocp_.handle) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "iocp"); } } void win_iocp_io_service::shutdown_service() { ::InterlockedExchange(&shutdown_, 1); if (timer_thread_.get()) { LARGE_INTEGER timeout; timeout.QuadPart = 1; ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE); } while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0) { op_queue ops; timer_queues_.get_all_timers(ops); ops.push(completed_ops_); if (!ops.empty()) { while (win_iocp_operation* op = ops.front()) { ops.pop(); ::InterlockedDecrement(&outstanding_work_); op->destroy(); } } else { DWORD bytes_transferred = 0; dword_ptr_t completion_key = 0; LPOVERLAPPED overlapped = 0; ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, &completion_key, &overlapped, gqcs_timeout_); if (overlapped) { ::InterlockedDecrement(&outstanding_work_); static_cast(overlapped)->destroy(); } } } if (timer_thread_.get()) timer_thread_->join(); } asio::error_code win_iocp_io_service::register_handle( HANDLE handle, asio::error_code& ec) { if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } return ec; } size_t win_iocp_io_service::run(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); size_t n = 0; while (do_one(true, ec)) if (n != (std::numeric_limits::max)()) ++n; return n; } size_t win_iocp_io_service::run_one(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(true, ec); } size_t win_iocp_io_service::poll(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); size_t n = 0; while (do_one(false, ec)) if (n != (std::numeric_limits::max)()) ++n; return n; } size_t win_iocp_io_service::poll_one(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(false, ec); } void win_iocp_io_service::stop() { if (::InterlockedExchange(&stopped_, 1) == 0) { if (::InterlockedExchange(&stop_event_posted_, 1) == 0) { if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "pqcs"); } } } } void win_iocp_io_service::post_deferred_completion(win_iocp_operation* op) { // Flag the operation as ready. op->ready_ = 1; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } void win_iocp_io_service::post_deferred_completions( op_queue& ops) { while (win_iocp_operation* op = ops.front()) { ops.pop(); // Flag the operation as ready. op->ready_ = 1; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); completed_ops_.push(ops); ::InterlockedExchange(&dispatch_required_, 1); } } } void win_iocp_io_service::abandon_operations( op_queue& ops) { while (win_iocp_operation* op = ops.front()) { ops.pop(); ::InterlockedDecrement(&outstanding_work_); op->destroy(); } } void win_iocp_io_service::on_pending(win_iocp_operation* op) { if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) { // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } } void win_iocp_io_service::on_completion(win_iocp_operation* op, DWORD last_error, DWORD bytes_transferred) { // Flag that the operation is ready for invocation. op->ready_ = 1; // Store results in the OVERLAPPED structure. op->Internal = reinterpret_cast( &asio::error::get_system_category()); op->Offset = last_error; op->OffsetHigh = bytes_transferred; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } void win_iocp_io_service::on_completion(win_iocp_operation* op, const asio::error_code& ec, DWORD bytes_transferred) { // Flag that the operation is ready for invocation. op->ready_ = 1; // Store results in the OVERLAPPED structure. op->Internal = reinterpret_cast(&ec.category()); op->Offset = ec.value(); op->OffsetHigh = bytes_transferred; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } size_t win_iocp_io_service::do_one(bool block, asio::error_code& ec) { for (;;) { // Try to acquire responsibility for dispatching timers and completed ops. if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1) { mutex::scoped_lock lock(dispatch_mutex_); // Dispatch pending timers and operations. op_queue ops; ops.push(completed_ops_); timer_queues_.get_ready_timers(ops); post_deferred_completions(ops); update_timeout(); } // Get the next operation from the queue. DWORD bytes_transferred = 0; dword_ptr_t completion_key = 0; LPOVERLAPPED overlapped = 0; ::SetLastError(0); BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, &completion_key, &overlapped, block ? gqcs_timeout_ : 0); DWORD last_error = ::GetLastError(); if (overlapped) { win_iocp_operation* op = static_cast(overlapped); asio::error_code result_ec(last_error, asio::error::get_system_category()); // We may have been passed the last_error and bytes_transferred in the // OVERLAPPED structure itself. if (completion_key == overlapped_contains_result) { result_ec = asio::error_code(static_cast(op->Offset), *reinterpret_cast(op->Internal)); bytes_transferred = op->OffsetHigh; } // Otherwise ensure any result has been saved into the OVERLAPPED // structure. else { op->Internal = reinterpret_cast(&result_ec.category()); op->Offset = result_ec.value(); op->OffsetHigh = bytes_transferred; } // Dispatch the operation only if ready. The operation may not be ready // if the initiating function (e.g. a call to WSARecv) has not yet // returned. This is because the initiating function still wants access // to the operation's OVERLAPPED structure. if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) { // Ensure the count of outstanding work is decremented on block exit. work_finished_on_block_exit on_exit = { this }; (void)on_exit; op->complete(*this, result_ec, bytes_transferred); ec = asio::error_code(); return 1; } } else if (!ok) { if (last_error != WAIT_TIMEOUT) { ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } // If we're not polling we need to keep going until we get a real handler. if (block) continue; ec = asio::error_code(); return 0; } else if (completion_key == wake_for_dispatch) { // We have been woken up to try to acquire responsibility for dispatching // timers and completed operations. } else { // Indicate that there is no longer an in-flight stop event. ::InterlockedExchange(&stop_event_posted_, 0); // The stopped_ flag is always checked to ensure that any leftover // stop events from a previous run invocation are ignored. if (::InterlockedExchangeAdd(&stopped_, 0) != 0) { // Wake up next thread that is blocked on GetQueuedCompletionStatus. if (::InterlockedExchange(&stop_event_posted_, 1) == 0) { if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) { last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } } ec = asio::error_code(); return 0; } } } } DWORD win_iocp_io_service::get_gqcs_timeout() { OSVERSIONINFOEX osvi; ZeroMemory(&osvi, sizeof(osvi)); osvi.dwOSVersionInfoSize = sizeof(osvi); osvi.dwMajorVersion = 6ul; const uint64_t condition_mask = ::VerSetConditionMask( 0, VER_MAJORVERSION, VER_GREATER_EQUAL); if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask)) return INFINITE; return default_gqcs_timeout; } void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(dispatch_mutex_); timer_queues_.insert(&queue); if (!waitable_timer_.handle) { waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0); if (waitable_timer_.handle == 0) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "timer"); } LARGE_INTEGER timeout; timeout.QuadPart = -max_timeout_usec; timeout.QuadPart *= 10; ::SetWaitableTimer(waitable_timer_.handle, &timeout, max_timeout_msec, 0, 0, FALSE); } if (!timer_thread_.get()) { timer_thread_function thread_function = { this }; timer_thread_.reset(new thread(thread_function, 65536)); } } void win_iocp_io_service::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(dispatch_mutex_); timer_queues_.erase(&queue); } void win_iocp_io_service::update_timeout() { if (timer_thread_.get()) { // There's no point updating the waitable timer if the new timeout period // exceeds the maximum timeout. In that case, we might as well wait for the // existing period of the timer to expire. long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec); if (timeout_usec < max_timeout_usec) { LARGE_INTEGER timeout; timeout.QuadPart = -timeout_usec; timeout.QuadPart *= 10; ::SetWaitableTimer(waitable_timer_.handle, &timeout, max_timeout_msec, 0, 0, FALSE); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/reactive_serial_port_service.ipp0000644000015300001660000000766313042054732025702 0ustar jenkinsjenkins// // detail/impl/reactive_serial_port_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/detail/reactive_serial_port_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_serial_port_service::reactive_serial_port_service( asio::io_service& io_service) : descriptor_service_(io_service) { } void reactive_serial_port_service::shutdown_service() { descriptor_service_.shutdown_service(); } asio::error_code reactive_serial_port_service::open( reactive_serial_port_service::implementation_type& impl, const std::string& device, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } descriptor_ops::state_type state = 0; int fd = descriptor_ops::open(device.c_str(), O_RDWR | O_NONBLOCK | O_NOCTTY, ec); if (fd < 0) return ec; int s = descriptor_ops::fcntl(fd, F_GETFL, ec); if (s >= 0) s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec); if (s < 0) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); return ec; } // Set up default serial port options. termios ios; errno = 0; s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec); if (s >= 0) { #if defined(_BSD_SOURCE) ::cfmakeraw(&ios); #else ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); ios.c_oflag &= ~OPOST; ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); ios.c_cflag &= ~(CSIZE | PARENB); ios.c_cflag |= CS8; #endif ios.c_iflag |= IGNPAR; ios.c_cflag |= CREAD | CLOCAL; errno = 0; s = descriptor_ops::error_wrapper(::tcsetattr(fd, TCSANOW, &ios), ec); } if (s < 0) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); return ec; } // We're done. Take ownership of the serial port descriptor. if (descriptor_service_.assign(impl, fd, ec)) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); } return ec; } asio::error_code reactive_serial_port_service::do_set_option( reactive_serial_port_service::implementation_type& impl, reactive_serial_port_service::store_function_type store, const void* option, asio::error_code& ec) { termios ios; errno = 0; descriptor_ops::error_wrapper(::tcgetattr( descriptor_service_.native_handle(impl), &ios), ec); if (ec) return ec; if (store(option, ios, ec)) return ec; errno = 0; descriptor_ops::error_wrapper(::tcsetattr( descriptor_service_.native_handle(impl), TCSANOW, &ios), ec); return ec; } asio::error_code reactive_serial_port_service::do_get_option( const reactive_serial_port_service::implementation_type& impl, reactive_serial_port_service::load_function_type load, void* option, asio::error_code& ec) const { termios ios; errno = 0; descriptor_ops::error_wrapper(::tcgetattr( descriptor_service_.native_handle(impl), &ios), ec); if (ec) return ec; return load(option, ios, ec); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP galera-3-25.3.20/asio/asio/detail/impl/kqueue_reactor.ipp0000644000015300001660000003344213042054732022765 0ustar jenkinsjenkins// // detail/impl/kqueue_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP #define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include "asio/detail/kqueue_reactor.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" #if defined(__NetBSD__) # define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ EV_SET(ev, ident, filt, flags, fflags, data, \ reinterpret_cast(static_cast(udata))) #else # define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ EV_SET(ev, ident, filt, flags, fflags, data, udata) #endif namespace asio { namespace detail { kqueue_reactor::kqueue_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), kqueue_fd_(do_kqueue_create()), interrupter_(), shutdown_(false) { struct kevent events[1]; ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, &interrupter_); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) { asio::error_code error(errno, asio::error::get_system_category()); asio::detail::throw_error(error); } } kqueue_reactor::~kqueue_reactor() { close(kqueue_fd_); } void kqueue_reactor::shutdown_service() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; while (descriptor_state* state = registered_descriptors_.first()) { for (int i = 0; i < max_ops; ++i) ops.push(state->op_queue_[i]); state->shutdown_ = true; registered_descriptors_.free(state); } timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void kqueue_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) { // The kqueue descriptor is automatically closed in the child. kqueue_fd_ = -1; kqueue_fd_ = do_kqueue_create(); interrupter_.recreate(); struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, &interrupter_); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue interrupter registration"); } // Re-register all descriptors with kqueue. mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); for (descriptor_state* state = registered_descriptors_.first(); state != 0; state = state->next_) { if (state->num_kevents_ > 0) { ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state); ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state); if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue re-registration"); } } } } } void kqueue_reactor::init_task() { io_service_.init_task(); } int kqueue_reactor::register_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data) { descriptor_data = allocate_descriptor_state(); mutex::scoped_lock lock(descriptor_data->mutex_); descriptor_data->descriptor_ = descriptor; descriptor_data->num_kevents_ = 0; descriptor_data->shutdown_ = false; return 0; } int kqueue_reactor::register_internal_descriptor( int op_type, socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op) { descriptor_data = allocate_descriptor_state(); mutex::scoped_lock lock(descriptor_data->mutex_); descriptor_data->descriptor_ = descriptor; descriptor_data->num_kevents_ = 1; descriptor_data->shutdown_ = false; descriptor_data->op_queue_[op_type].push(op); struct kevent events[1]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) return errno; return 0; } void kqueue_reactor::move_descriptor(socket_type, kqueue_reactor::per_descriptor_data& target_descriptor_data, kqueue_reactor::per_descriptor_data& source_descriptor_data) { target_descriptor_data = source_descriptor_data; source_descriptor_data = 0; } void kqueue_reactor::start_op(int op_type, socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative) { if (!descriptor_data) { op->ec_ = asio::error::bad_descriptor; post_immediate_completion(op, is_continuation); return; } mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (descriptor_data->shutdown_) { post_immediate_completion(op, is_continuation); return; } if (descriptor_data->op_queue_[op_type].empty()) { static const int num_kevents[max_ops] = { 1, 2, 1 }; if (allow_speculative && (op_type != read_op || descriptor_data->op_queue_[except_op].empty())) { if (op->perform()) { descriptor_lock.unlock(); io_service_.post_immediate_completion(op, is_continuation); return; } if (descriptor_data->num_kevents_ < num_kevents[op_type]) { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1) { descriptor_data->num_kevents_ = num_kevents[op_type]; } else { op->ec_ = asio::error_code(errno, asio::error::get_system_category()); io_service_.post_immediate_completion(op, is_continuation); return; } } } else { if (descriptor_data->num_kevents_ < num_kevents[op_type]) descriptor_data->num_kevents_ = num_kevents[op_type]; struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); } } descriptor_data->op_queue_[op_type].push(op); io_service_.work_started(); } void kqueue_reactor::cancel_ops(socket_type, kqueue_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_lock.unlock(); io_service_.post_deferred_completions(ops); } void kqueue_reactor::deregister_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, bool closing) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { if (closing) { // The descriptor will be automatically removed from the kqueue when it // is closed. } else { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); } op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; io_service_.post_deferred_completions(ops); } } void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); op_queue ops; for (int i = 0; i < max_ops; ++i) ops.push(descriptor_data->op_queue_[i]); descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; } } void kqueue_reactor::run(bool block, op_queue& ops) { mutex::scoped_lock lock(mutex_); // Determine how long to block while waiting for events. timespec timeout_buf = { 0, 0 }; timespec* timeout = block ? get_timeout(timeout_buf) : &timeout_buf; lock.unlock(); // Block on the kqueue descriptor. struct kevent events[128]; int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout); // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = reinterpret_cast(events[i].udata); if (ptr == &interrupter_) { interrupter_.reset(); } else { descriptor_state* descriptor_data = static_cast(ptr); mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (events[i].filter == EVFILT_WRITE && descriptor_data->num_kevents_ == 2 && descriptor_data->op_queue_[write_op].empty()) { // Some descriptor types, like serial ports, don't seem to support // EV_CLEAR with EVFILT_WRITE. Since we have no pending write // operations we'll remove the EVFILT_WRITE registration here so that // we don't end up in a tight spin. struct kevent delete_events[1]; ASIO_KQUEUE_EV_SET(&delete_events[0], descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0); descriptor_data->num_kevents_ = 1; } // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. #if defined(__NetBSD__) static const unsigned int filter[max_ops] = #else static const int filter[max_ops] = #endif { EVFILT_READ, EVFILT_WRITE, EVFILT_READ }; for (int j = max_ops - 1; j >= 0; --j) { if (events[i].filter == filter[j]) { if (j != except_op || events[i].flags & EV_OOBAND) { while (reactor_op* op = descriptor_data->op_queue_[j].front()) { if (events[i].flags & EV_ERROR) { op->ec_ = asio::error_code( static_cast(events[i].data), asio::error::get_system_category()); descriptor_data->op_queue_[j].pop(); ops.push(op); } if (op->perform()) { descriptor_data->op_queue_[j].pop(); ops.push(op); } else break; } } } } } } lock.lock(); timer_queues_.get_ready_timers(ops); } void kqueue_reactor::interrupt() { interrupter_.interrupt(); } int kqueue_reactor::do_kqueue_create() { int fd = ::kqueue(); if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue"); } return fd; } kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state() { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); return registered_descriptors_.alloc(); } void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s) { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); registered_descriptors_.free(s); } void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } timespec* kqueue_reactor::get_timeout(timespec& ts) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); ts.tv_sec = usec / 1000000; ts.tv_nsec = (usec % 1000000) * 1000; return &ts; } } // namespace detail } // namespace asio #undef ASIO_KQUEUE_EV_SET #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP galera-3-25.3.20/asio/asio/detail/impl/dev_poll_reactor.ipp0000644000015300001660000003037013042054732023267 0ustar jenkinsjenkins// // detail/impl/dev_poll_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP #define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include "asio/detail/dev_poll_reactor.hpp" #include "asio/detail/assert.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { dev_poll_reactor::dev_poll_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), dev_poll_fd_(do_dev_poll_create()), interrupter_(), shutdown_(false) { // Add the interrupter's descriptor to /dev/poll. ::pollfd ev = { 0, 0, 0 }; ev.fd = interrupter_.read_descriptor(); ev.events = POLLIN | POLLERR; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); } dev_poll_reactor::~dev_poll_reactor() { shutdown_service(); ::close(dev_poll_fd_); } void dev_poll_reactor::shutdown_service() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].get_all_operations(ops); timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void dev_poll_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) { detail::mutex::scoped_lock lock(mutex_); if (dev_poll_fd_ != -1) ::close(dev_poll_fd_); dev_poll_fd_ = -1; dev_poll_fd_ = do_dev_poll_create(); interrupter_.recreate(); // Add the interrupter's descriptor to /dev/poll. ::pollfd ev = { 0, 0, 0 }; ev.fd = interrupter_.read_descriptor(); ev.events = POLLIN | POLLERR; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); // Re-register all descriptors with /dev/poll. The changes will be written // to the /dev/poll descriptor the next time the reactor is run. for (int i = 0; i < max_ops; ++i) { reactor_op_queue::iterator iter = op_queue_[i].begin(); reactor_op_queue::iterator end = op_queue_[i].end(); for (; iter != end; ++iter) { ::pollfd& pending_ev = add_pending_event_change(iter->first); pending_ev.events |= POLLERR | POLLHUP; switch (i) { case read_op: pending_ev.events |= POLLIN; break; case write_op: pending_ev.events |= POLLOUT; break; case except_op: pending_ev.events |= POLLPRI; break; default: break; } } } interrupter_.interrupt(); } } void dev_poll_reactor::init_task() { io_service_.init_task(); } int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&) { return 0; } int dev_poll_reactor::register_internal_descriptor(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue_[op_type].enqueue_operation(descriptor, op); ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLERR | POLLHUP; switch (op_type) { case read_op: ev.events |= POLLIN; break; case write_op: ev.events |= POLLOUT; break; case except_op: ev.events |= POLLPRI; break; default: break; } interrupter_.interrupt(); return 0; } void dev_poll_reactor::move_descriptor(socket_type, dev_poll_reactor::per_descriptor_data&, dev_poll_reactor::per_descriptor_data&) { } void dev_poll_reactor::start_op(int op_type, socket_type descriptor, dev_poll_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation, bool allow_speculative) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { post_immediate_completion(op, is_continuation); return; } if (allow_speculative) { if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor)) { if (!op_queue_[op_type].has_operation(descriptor)) { if (op->perform()) { lock.unlock(); io_service_.post_immediate_completion(op, is_continuation); return; } } } } bool first = op_queue_[op_type].enqueue_operation(descriptor, op); io_service_.work_started(); if (first) { ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLERR | POLLHUP; if (op_type == read_op || op_queue_[read_op].has_operation(descriptor)) ev.events |= POLLIN; if (op_type == write_op || op_queue_[write_op].has_operation(descriptor)) ev.events |= POLLOUT; if (op_type == except_op || op_queue_[except_op].has_operation(descriptor)) ev.events |= POLLPRI; interrupter_.interrupt(); } } void dev_poll_reactor::cancel_ops(socket_type descriptor, dev_poll_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void dev_poll_reactor::deregister_descriptor(socket_type descriptor, dev_poll_reactor::per_descriptor_data&, bool) { asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from /dev/poll. ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLREMOVE; interrupter_.interrupt(); // Cancel any outstanding operations associated with the descriptor. cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void dev_poll_reactor::deregister_internal_descriptor( socket_type descriptor, dev_poll_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from /dev/poll. Since this function is only called // during a fork, we can apply the change immediately. ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLREMOVE; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); // Destroy all operations associated with the descriptor. op_queue ops; asio::error_code ec; for (int i = 0; i < max_ops; ++i) op_queue_[i].cancel_operations(descriptor, ops, ec); } void dev_poll_reactor::run(bool block, op_queue& ops) { asio::detail::mutex::scoped_lock lock(mutex_); // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (!block && op_queue_[read_op].empty() && op_queue_[write_op].empty() && op_queue_[except_op].empty() && timer_queues_.all_empty()) return; // Write the pending event registration changes to the /dev/poll descriptor. std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size(); if (events_size > 0) { errno = 0; int result = ::write(dev_poll_fd_, &pending_event_changes_[0], events_size); if (result != static_cast(events_size)) { asio::error_code ec = asio::error_code( errno, asio::error::get_system_category()); for (std::size_t i = 0; i < pending_event_changes_.size(); ++i) { int descriptor = pending_event_changes_[i].fd; for (int j = 0; j < max_ops; ++j) op_queue_[j].cancel_operations(descriptor, ops, ec); } } pending_event_changes_.clear(); pending_event_change_index_.clear(); } int timeout = block ? get_timeout() : 0; lock.unlock(); // Block on the /dev/poll descriptor. ::pollfd events[128] = { { 0, 0, 0 } }; ::dvpoll dp = { 0, 0, 0 }; dp.dp_fds = events; dp.dp_nfds = 128; dp.dp_timeout = timeout; int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp); lock.lock(); // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { int descriptor = events[i].fd; if (descriptor == interrupter_.read_descriptor()) { interrupter_.reset(); } else { bool more_reads = false; bool more_writes = false; bool more_except = false; // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. if (events[i].events & (POLLPRI | POLLERR | POLLHUP)) more_except = op_queue_[except_op].perform_operations(descriptor, ops); else more_except = op_queue_[except_op].has_operation(descriptor); if (events[i].events & (POLLIN | POLLERR | POLLHUP)) more_reads = op_queue_[read_op].perform_operations(descriptor, ops); else more_reads = op_queue_[read_op].has_operation(descriptor); if (events[i].events & (POLLOUT | POLLERR | POLLHUP)) more_writes = op_queue_[write_op].perform_operations(descriptor, ops); else more_writes = op_queue_[write_op].has_operation(descriptor); if ((events[i].events & (POLLERR | POLLHUP)) != 0 && !more_except && !more_reads && !more_writes) { // If we have an event and no operations associated with the // descriptor then we need to delete the descriptor from /dev/poll. // The poll operation can produce POLLHUP or POLLERR events when there // is no operation pending, so if we do not remove the descriptor we // can end up in a tight polling loop. ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLREMOVE; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); } else { ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLERR | POLLHUP; if (more_reads) ev.events |= POLLIN; if (more_writes) ev.events |= POLLOUT; if (more_except) ev.events |= POLLPRI; ev.revents = 0; int result = ::write(dev_poll_fd_, &ev, sizeof(ev)); if (result != sizeof(ev)) { asio::error_code ec(errno, asio::error::get_system_category()); for (int j = 0; j < max_ops; ++j) op_queue_[j].cancel_operations(descriptor, ops, ec); } } } } timer_queues_.get_ready_timers(ops); } void dev_poll_reactor::interrupt() { interrupter_.interrupt(); } int dev_poll_reactor::do_dev_poll_create() { int fd = ::open("/dev/poll", O_RDWR); if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "/dev/poll"); } return fd; } void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } int dev_poll_reactor::get_timeout() { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. return timer_queues_.wait_duration_msec(5 * 60 * 1000); } void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec) { bool need_interrupt = false; op_queue ops; for (int i = 0; i < max_ops; ++i) need_interrupt = op_queue_[i].cancel_operations( descriptor, ops, ec) || need_interrupt; io_service_.post_deferred_completions(ops); if (need_interrupt) interrupter_.interrupt(); } ::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor) { hash_map::iterator iter = pending_event_change_index_.find(descriptor); if (iter == pending_event_change_index_.end()) { std::size_t index = pending_event_changes_.size(); pending_event_changes_.reserve(pending_event_changes_.size() + 1); pending_event_change_index_.insert(std::make_pair(descriptor, index)); pending_event_changes_.push_back(::pollfd()); pending_event_changes_[index].fd = descriptor; pending_event_changes_[index].revents = 0; return pending_event_changes_[index]; } else { return pending_event_changes_[iter->second]; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP galera-3-25.3.20/asio/asio/detail/impl/pipe_select_interrupter.ipp0000644000015300001660000000565313042054732024711 0ustar jenkinsjenkins// // detail/impl/pipe_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(ASIO_WINDOWS) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(ASIO_HAS_EVENTFD) #include #include #include #include #include "asio/detail/pipe_select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { pipe_select_interrupter::pipe_select_interrupter() { open_descriptors(); } void pipe_select_interrupter::open_descriptors() { int pipe_fds[2]; if (pipe(pipe_fds) == 0) { read_descriptor_ = pipe_fds[0]; ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); write_descriptor_ = pipe_fds[1]; ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); #if defined(FD_CLOEXEC) ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); #endif // defined(FD_CLOEXEC) } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "pipe_select_interrupter"); } } pipe_select_interrupter::~pipe_select_interrupter() { close_descriptors(); } void pipe_select_interrupter::close_descriptors() { if (read_descriptor_ != -1) ::close(read_descriptor_); if (write_descriptor_ != -1) ::close(write_descriptor_); } void pipe_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = -1; read_descriptor_ = -1; open_descriptors(); } void pipe_select_interrupter::interrupt() { char byte = 0; signed_size_type result = ::write(write_descriptor_, &byte, 1); (void)result; } bool pipe_select_interrupter::reset() { for (;;) { char data[1024]; signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = ::read(read_descriptor_, data, sizeof(data)); return was_interrupted; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(ASIO_WINDOWS) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP galera-3-25.3.20/asio/asio/detail/impl/winrt_timer_scheduler.ipp0000644000015300001660000000546713042054732024356 0ustar jenkinsjenkins// // detail/impl/winrt_timer_scheduler.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP #define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/winrt_timer_scheduler.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { winrt_timer_scheduler::winrt_timer_scheduler( asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), event_(), timer_queues_(), thread_(0), stop_thread_(false), shutdown_(false) { thread_ = new asio::detail::thread( bind_handler(&winrt_timer_scheduler::call_run_thread, this)); } winrt_timer_scheduler::~winrt_timer_scheduler() { shutdown_service(); } void winrt_timer_scheduler::shutdown_service() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; stop_thread_ = true; event_.signal(lock); lock.unlock(); if (thread_) { thread_->join(); delete thread_; thread_ = 0; } op_queue ops; timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void winrt_timer_scheduler::fork_service(asio::io_service::fork_event) { } void winrt_timer_scheduler::init_task() { } void winrt_timer_scheduler::run_thread() { asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { const long max_wait_duration = 5 * 60 * 1000000; long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration); event_.wait_for_usec(lock, wait_duration); event_.clear(lock); op_queue ops; timer_queues_.get_ready_timers(ops); if (!ops.empty()) { lock.unlock(); io_service_.post_deferred_completions(ops); lock.lock(); } } } void winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler) { scheduler->run_thread(); } void winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP galera-3-25.3.20/asio/asio/detail/impl/epoll_reactor.hpp0000644000015300001660000000362313042054732022576 0ustar jenkinsjenkins// // detail/impl/epoll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP #define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if defined(ASIO_HAS_EPOLL) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void epoll_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } template void epoll_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void epoll_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) update_timeout(); } template std::size_t epoll_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP galera-3-25.3.20/asio/asio/detail/impl/service_registry.hpp0000644000015300001660000000427213042054732023335 0ustar jenkinsjenkins// // detail/impl/service_registry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP #define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template service_registry::service_registry( asio::io_service& o, Service*, Arg arg) : owner_(o), first_service_(new Service(o, arg)) { asio::io_service::service::key key; init_key(key, Service::id); first_service_->key_ = key; first_service_->next_ = 0; } template Service& service_registry::first_service() { return *static_cast(first_service_); } template Service& service_registry::use_service() { asio::io_service::service::key key; init_key(key, Service::id); factory_type factory = &service_registry::create; return *static_cast(do_use_service(key, factory)); } template void service_registry::add_service(Service* new_service) { asio::io_service::service::key key; init_key(key, Service::id); return do_add_service(key, new_service); } template bool service_registry::has_service() const { asio::io_service::service::key key; init_key(key, Service::id); return do_has_service(key); } #if !defined(ASIO_NO_TYPEID) template void service_registry::init_key(asio::io_service::service::key& key, const asio::detail::service_id& /*id*/) { key.type_info_ = &typeid(typeid_wrapper); key.id_ = 0; } #endif // !defined(ASIO_NO_TYPEID) template asio::io_service::service* service_registry::create( asio::io_service& owner) { return new Service(owner); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP galera-3-25.3.20/asio/asio/detail/impl/service_registry.ipp0000644000015300001660000001233513042054732023335 0ustar jenkinsjenkins// // detail/impl/service_registry.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP #define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/service_registry.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { service_registry::~service_registry() { // Shutdown all services. This must be done in a separate loop before the // services are destroyed since the destructors of user-defined handler // objects may try to access other service objects. asio::io_service::service* service = first_service_; while (service) { service->shutdown_service(); service = service->next_; } // Destroy all services. while (first_service_) { asio::io_service::service* next_service = first_service_->next_; destroy(first_service_); first_service_ = next_service; } } void service_registry::notify_fork(asio::io_service::fork_event fork_ev) { // Make a copy of all of the services while holding the lock. We don't want // to hold the lock while calling into each service, as it may try to call // back into this class. std::vector services; { asio::detail::mutex::scoped_lock lock(mutex_); asio::io_service::service* service = first_service_; while (service) { services.push_back(service); service = service->next_; } } // If processing the fork_prepare event, we want to go in reverse order of // service registration, which happens to be the existing order of the // services in the vector. For the other events we want to go in the other // direction. std::size_t num_services = services.size(); if (fork_ev == asio::io_service::fork_prepare) for (std::size_t i = 0; i < num_services; ++i) services[i]->fork_service(fork_ev); else for (std::size_t i = num_services; i > 0; --i) services[i - 1]->fork_service(fork_ev); } void service_registry::init_key(asio::io_service::service::key& key, const asio::io_service::id& id) { key.type_info_ = 0; key.id_ = &id; } bool service_registry::keys_match( const asio::io_service::service::key& key1, const asio::io_service::service::key& key2) { if (key1.id_ && key2.id_) if (key1.id_ == key2.id_) return true; if (key1.type_info_ && key2.type_info_) if (*key1.type_info_ == *key2.type_info_) return true; return false; } void service_registry::destroy(asio::io_service::service* service) { delete service; } asio::io_service::service* service_registry::do_use_service( const asio::io_service::service::key& key, factory_type factory) { asio::detail::mutex::scoped_lock lock(mutex_); // First see if there is an existing service object with the given key. asio::io_service::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) return service; service = service->next_; } // Create a new service object. The service registry's mutex is not locked // at this time to allow for nested calls into this function from the new // service's constructor. lock.unlock(); auto_service_ptr new_service = { factory(owner_) }; new_service.ptr_->key_ = key; lock.lock(); // Check that nobody else created another service object of the same type // while the lock was released. service = first_service_; while (service) { if (keys_match(service->key_, key)) return service; service = service->next_; } // Service was successfully initialised, pass ownership to registry. new_service.ptr_->next_ = first_service_; first_service_ = new_service.ptr_; new_service.ptr_ = 0; return first_service_; } void service_registry::do_add_service( const asio::io_service::service::key& key, asio::io_service::service* new_service) { if (&owner_ != &new_service->get_io_service()) asio::detail::throw_exception(invalid_service_owner()); asio::detail::mutex::scoped_lock lock(mutex_); // Check if there is an existing service object with the given key. asio::io_service::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) asio::detail::throw_exception(service_already_exists()); service = service->next_; } // Take ownership of the service object. new_service->key_ = key; new_service->next_ = first_service_; first_service_ = new_service; } bool service_registry::do_has_service( const asio::io_service::service::key& key) const { asio::detail::mutex::scoped_lock lock(mutex_); asio::io_service::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) return true; service = service->next_; } return false; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP galera-3-25.3.20/asio/asio/detail/impl/eventfd_select_interrupter.ipp0000644000015300001660000001060113042054732025374 0ustar jenkinsjenkins// // detail/impl/eventfd_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EVENTFD) #include #include #include #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # include #else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # include #endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 #include "asio/detail/cstdint.hpp" #include "asio/detail/eventfd_select_interrupter.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { eventfd_select_interrupter::eventfd_select_interrupter() { open_descriptors(); } void eventfd_select_interrupter::open_descriptors() { #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0); if (read_descriptor_ != -1) { ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); } #else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) write_descriptor_ = read_descriptor_ = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); # else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) errno = EINVAL; write_descriptor_ = read_descriptor_ = -1; # endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) if (read_descriptor_ == -1 && errno == EINVAL) { write_descriptor_ = read_descriptor_ = ::eventfd(0, 0); if (read_descriptor_ != -1) { ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); } } #endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 if (read_descriptor_ == -1) { int pipe_fds[2]; if (pipe(pipe_fds) == 0) { read_descriptor_ = pipe_fds[0]; ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); write_descriptor_ = pipe_fds[1]; ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "eventfd_select_interrupter"); } } } eventfd_select_interrupter::~eventfd_select_interrupter() { close_descriptors(); } void eventfd_select_interrupter::close_descriptors() { if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_) ::close(write_descriptor_); if (read_descriptor_ != -1) ::close(read_descriptor_); } void eventfd_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = -1; read_descriptor_ = -1; open_descriptors(); } void eventfd_select_interrupter::interrupt() { uint64_t counter(1UL); int result = ::write(write_descriptor_, &counter, sizeof(uint64_t)); (void)result; } bool eventfd_select_interrupter::reset() { if (write_descriptor_ == read_descriptor_) { for (;;) { // Only perform one read. The kernel maintains an atomic counter. uint64_t counter(0); errno = 0; int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); return was_interrupted; } } else { for (;;) { // Clear all data from the pipe. char data[1024]; int bytes_read = ::read(read_descriptor_, data, sizeof(data)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = ::read(read_descriptor_, data, sizeof(data)); return was_interrupted; } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EVENTFD) #endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP galera-3-25.3.20/asio/asio/detail/impl/posix_event.ipp0000644000015300001660000000213713042054732022307 0ustar jenkinsjenkins// // detail/impl/posix_event.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP #define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_event.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_event::posix_event() : state_(0) { int error = ::pthread_cond_init(&cond_, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP galera-3-25.3.20/asio/asio/detail/impl/posix_thread.ipp0000644000015300001660000000310013042054732022424 0ustar jenkinsjenkins// // detail/impl/posix_thread.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP #define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_thread.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_thread::~posix_thread() { if (!joined_) ::pthread_detach(thread_); } void posix_thread::join() { if (!joined_) { ::pthread_join(thread_, 0); joined_ = true; } } void posix_thread::start_thread(func_base* arg) { int error = ::pthread_create(&thread_, 0, asio_detail_posix_thread_function, arg); if (error != 0) { delete arg; asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } } void* asio_detail_posix_thread_function(void* arg) { posix_thread::auto_func_base_ptr func = { static_cast(arg) }; func.ptr->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP galera-3-25.3.20/asio/asio/detail/reactive_socket_service_base.hpp0000644000015300001660000003664213042054732024676 0ustar jenkinsjenkins// // detail/reactive_socket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) \ && !defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_socket_recv_op.hpp" #include "asio/detail/reactive_socket_recvmsg_op.hpp" #include "asio/detail/reactive_socket_send_op.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_socket_service_base { public: // The native type of a socket. typedef socket_type native_handle_type; // The implementation type of the socket. struct base_implementation_type { // The native socket representation. socket_type socket_; // The current state of the socket. socket_ops::state_type state_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; }; // Constructor. ASIO_DECL reactive_socket_service_base( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type& impl); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, reactive_socket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != invalid_socket; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Get the native socket representation. native_handle_type native_handle(base_implementation_type& impl) { return impl.socket_; } // Cancel all operations associated with the socket. ASIO_DECL asio::error_code cancel( base_implementation_type& impl, asio::error_code& ec); // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::sockatmark(impl.socket_, ec); } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::available(impl.socket_, ec); } // Place the socket into the state where it will listen for new connections. asio::error_code listen(base_implementation_type& impl, int backlog, asio::error_code& ec) { socket_ops::listen(impl.socket_, backlog, ec); return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { socket_ops::ioctl(impl.socket_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { socket_ops::shutdown(impl.socket_, what, ec); return ec; } // Send the given data to the peer. template size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_send(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be sent without blocking. size_t send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send")); start_op(impl, reactor::write_op, p.p, is_continuation, true, ((impl.state_ & socket_ops::stream_oriented) && buffer_sequence_adapter::all_empty(buffers))); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive some data from the peer. Returns the number of bytes received. template size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recv(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be received without blocking. size_t receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, buffers, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, (flags & socket_base::message_out_of_band) == 0, ((impl.state_ & socket_ops::stream_oriented) && buffer_sequence_adapter::all_empty(buffers))); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive(null_buffers)")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive some data with associated flags. Returns the number of bytes // received. template size_t receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recvmsg(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), in_flags, out_flags, ec); } // Wait until data can be received without blocking. size_t receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recvmsg_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, in_flags, out_flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags")); start_op(impl, (in_flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, (in_flags & socket_base::message_out_of_band) == 0, false); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags(null_buffers)")); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; start_op(impl, (in_flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } protected: // Open a new socket implementation. ASIO_DECL asio::error_code do_open( base_implementation_type& impl, int af, int type, int protocol, asio::error_code& ec); // Assign a native socket to a socket implementation. ASIO_DECL asio::error_code do_assign( base_implementation_type& impl, int type, const native_handle_type& native_socket, asio::error_code& ec); // Start the asynchronous read or write operation. ASIO_DECL void start_op(base_implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop); // Start the asynchronous accept operation. ASIO_DECL void start_accept_op(base_implementation_type& impl, reactor_op* op, bool is_continuation, bool peer_is_open); // Start the asynchronous connect operation. ASIO_DECL void start_connect_op(base_implementation_type& impl, reactor_op* op, bool is_continuation, const socket_addr_type* addr, size_t addrlen); // The selector that performs event demultiplexing for the service. reactor& reactor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_socket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_IOCP) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP galera-3-25.3.20/asio/asio/detail/assert.hpp0000644000015300001660000000160013042054732020275 0ustar jenkinsjenkins// // detail/assert.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ASSERT_HPP #define ASIO_DETAIL_ASSERT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_ASSERT) # include #else // defined(ASIO_HAS_BOOST_ASSERT) # include #endif // defined(ASIO_HAS_BOOST_ASSERT) #if defined(ASIO_HAS_BOOST_ASSERT) # define ASIO_ASSERT(expr) BOOST_ASSERT(expr) #else // defined(ASIO_HAS_BOOST_ASSERT) # define ASIO_ASSERT(expr) assert(expr) #endif // defined(ASIO_HAS_BOOST_ASSERT) #endif // ASIO_DETAIL_ASSERT_HPP galera-3-25.3.20/asio/asio/detail/resolve_endpoint_op.hpp0000644000015300001660000000777513042054732023074 0ustar jenkinsjenkins// // detail/resolve_endpoint_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP #define ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolve_endpoint_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(resolve_endpoint_op); typedef typename Protocol::endpoint endpoint_type; typedef asio::ip::basic_resolver_iterator iterator_type; resolve_endpoint_op(socket_ops::weak_cancel_token_type cancel_token, const endpoint_type& endpoint, io_service_impl& ios, Handler& handler) : operation(&resolve_endpoint_op::do_complete), cancel_token_(cancel_token), endpoint_(endpoint), io_service_impl_(ios), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the operation object. resolve_endpoint_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner && owner != &o->io_service_impl_) { // The operation is being run on the worker io_service. Time to perform // the resolver operation. // Perform the blocking endpoint resolution operation. char host_name[NI_MAXHOST]; char service_name[NI_MAXSERV]; socket_ops::background_getnameinfo(o->cancel_token_, o->endpoint_.data(), o->endpoint_.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV, o->endpoint_.protocol().type(), o->ec_); o->iter_ = iterator_type::create(o->endpoint_, host_name, service_name); // Pass operation back to main io_service for completion. o->io_service_impl_.post_deferred_completion(o); p.v = p.p = 0; } else { // The operation has been returned to the main io_service. The completion // handler is ready to be delivered. ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated // before the upcall is made. Even if we're not about to make an upcall, // a sub-object of the handler may be the true owner of the memory // associated with the handler. Consequently, a local copy of the handler // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->iter_); p.h = asio::detail::addressof(handler.handler_); p.reset(); if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } } private: socket_ops::weak_cancel_token_type cancel_token_; endpoint_type endpoint_; io_service_impl& io_service_impl_; Handler handler_; asio::error_code ec_; iterator_type iter_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP galera-3-25.3.20/asio/asio/detail/null_static_mutex.hpp0000644000015300001660000000220113042054732022535 0ustar jenkinsjenkins// // detail/null_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_STATIC_MUTEX_HPP #define ASIO_DETAIL_NULL_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct null_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. void init() { } // Lock the mutex. void lock() { } // Unlock the mutex. void unlock() { } int unused_; }; #define ASIO_NULL_STATIC_MUTEX_INIT { 0 } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_STATIC_MUTEX_HPP galera-3-25.3.20/asio/asio/detail/throw_error.hpp0000644000015300001660000000231413042054732021353 0ustar jenkinsjenkins// // detail/throw_error.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THROW_ERROR_HPP #define ASIO_DETAIL_THROW_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { ASIO_DECL void do_throw_error(const asio::error_code& err); ASIO_DECL void do_throw_error(const asio::error_code& err, const char* location); inline void throw_error(const asio::error_code& err) { if (err) do_throw_error(err); } inline void throw_error(const asio::error_code& err, const char* location) { if (err) do_throw_error(err, location); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/throw_error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_THROW_ERROR_HPP galera-3-25.3.20/asio/asio/detail/object_pool.hpp0000644000015300001660000000546013042054732021303 0ustar jenkinsjenkins// // detail/object_pool.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OBJECT_POOL_HPP #define ASIO_DETAIL_OBJECT_POOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class object_pool; class object_pool_access { public: template static Object* create() { return new Object; } template static void destroy(Object* o) { delete o; } template static Object*& next(Object* o) { return o->next_; } template static Object*& prev(Object* o) { return o->prev_; } }; template class object_pool : private noncopyable { public: // Constructor. object_pool() : live_list_(0), free_list_(0) { } // Destructor destroys all objects. ~object_pool() { destroy_list(live_list_); destroy_list(free_list_); } // Get the object at the start of the live list. Object* first() { return live_list_; } // Allocate a new object. Object* alloc() { Object* o = free_list_; if (o) free_list_ = object_pool_access::next(free_list_); else o = object_pool_access::create(); object_pool_access::next(o) = live_list_; object_pool_access::prev(o) = 0; if (live_list_) object_pool_access::prev(live_list_) = o; live_list_ = o; return o; } // Free an object. Moves it to the free list. No destructors are run. void free(Object* o) { if (live_list_ == o) live_list_ = object_pool_access::next(o); if (object_pool_access::prev(o)) { object_pool_access::next(object_pool_access::prev(o)) = object_pool_access::next(o); } if (object_pool_access::next(o)) { object_pool_access::prev(object_pool_access::next(o)) = object_pool_access::prev(o); } object_pool_access::next(o) = free_list_; object_pool_access::prev(o) = 0; free_list_ = o; } private: // Helper function to destroy all elements in a list. void destroy_list(Object* list) { while (list) { Object* o = list; list = object_pool_access::next(o); object_pool_access::destroy(o); } } // The list of live objects. Object* live_list_; // The free list. Object* free_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_OBJECT_POOL_HPP galera-3-25.3.20/asio/asio/detail/signal_op.hpp0000644000015300001660000000204013042054732020746 0ustar jenkinsjenkins// // detail/signal_op.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_OP_HPP #define ASIO_DETAIL_SIGNAL_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class signal_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The signal number to be passed to the completion handler. int signal_number_; protected: signal_op(func_type func) : operation(func), signal_number_(0) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SIGNAL_OP_HPP galera-3-25.3.20/asio/asio/detail/win_object_handle_service.hpp0000644000015300001660000001275513042054732024167 0ustar jenkinsjenkins// // detail/win_object_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP #define ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #include "asio/detail/addressof.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/wait_handler.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_object_handle_service { public: // The native type of an object handle. typedef HANDLE native_handle_type; // The implementation type of the object handle. class implementation_type { public: // Default constructor. implementation_type() : handle_(INVALID_HANDLE_VALUE), wait_handle_(INVALID_HANDLE_VALUE), owner_(0), next_(0), prev_(0) { } private: // Only this service will have access to the internal values. friend class win_object_handle_service; // The native object handle representation. May be accessed or modified // without locking the mutex. native_handle_type handle_; // The handle used to unregister the wait operation. The mutex must be // locked when accessing or modifying this member. HANDLE wait_handle_; // The operations waiting on the object handle. If there is a registered // wait then the mutex must be locked when accessing or modifying this // member op_queue op_queue_; // The service instance that owns the object handle implementation. win_object_handle_service* owner_; // Pointers to adjacent handle implementations in linked list. The mutex // must be locked when accessing or modifying these members. implementation_type* next_; implementation_type* prev_; }; // Constructor. ASIO_DECL win_object_handle_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new handle implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new handle implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another handle implementation. ASIO_DECL void move_assign(implementation_type& impl, win_object_handle_service& other_service, implementation_type& other_impl); // Destroy a handle implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native handle to a handle implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec); // Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return impl.handle_ != INVALID_HANDLE_VALUE && impl.handle_ != 0; } // Destroy a handle implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native handle representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.handle_; } // Cancel all operations associated with the handle. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Perform a synchronous wait for the object to enter a signalled state. ASIO_DECL void wait(implementation_type& impl, asio::error_code& ec); /// Start an asynchronous wait. template void async_wait(implementation_type& impl, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef wait_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "object_handle", &impl, "async_wait")); start_wait_op(impl, p.p); p.v = p.p = 0; } private: // Helper function to start an asynchronous wait operation. ASIO_DECL void start_wait_op(implementation_type& impl, wait_op* op); // Helper function to register a wait operation. ASIO_DECL void register_wait_callback( implementation_type& impl, mutex::scoped_lock& lock); // Callback function invoked when the registered wait completes. static ASIO_DECL VOID CALLBACK wait_callback( PVOID param, BOOLEAN timeout); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal state. mutex mutex_; // The head of a linked list of all implementations. implementation_type* impl_list_; // Flag to indicate that the dispatcher has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_object_handle_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #endif // ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP galera-3-25.3.20/asio/asio/coroutine.hpp0000644000015300001660000002267413042054732017557 0ustar jenkinsjenkins// // coroutine.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_COROUTINE_HPP #define ASIO_COROUTINE_HPP namespace asio { namespace detail { class coroutine_ref; } // namespace detail /// Provides support for implementing stackless coroutines. /** * The @c coroutine class may be used to implement stackless coroutines. The * class itself is used to store the current state of the coroutine. * * Coroutines are copy-constructible and assignable, and the space overhead is * a single int. They can be used as a base class: * * @code class session : coroutine * { * ... * }; @endcode * * or as a data member: * * @code class session * { * ... * coroutine coro_; * }; @endcode * * or even bound in as a function argument using lambdas or @c bind(). The * important thing is that as the application maintains a copy of the object * for as long as the coroutine must be kept alive. * * @par Pseudo-keywords * * A coroutine is used in conjunction with certain "pseudo-keywords", which * are implemented as macros. These macros are defined by a header file: * * @code #include @endcode * * and may conversely be undefined as follows: * * @code #include @endcode * * reenter * * The @c reenter macro is used to define the body of a coroutine. It takes a * single argument: a pointer or reference to a coroutine object. For example, * if the base class is a coroutine object you may write: * * @code reenter (this) * { * ... coroutine body ... * } @endcode * * and if a data member or other variable you can write: * * @code reenter (coro_) * { * ... coroutine body ... * } @endcode * * When @c reenter is executed at runtime, control jumps to the location of the * last @c yield or @c fork. * * The coroutine body may also be a single statement, such as: * * @code reenter (this) for (;;) * { * ... * } @endcode * * @b Limitation: The @c reenter macro is implemented using a switch. This * means that you must take care when using local variables within the * coroutine body. The local variable is not allowed in a position where * reentering the coroutine could bypass the variable definition. * * yield statement * * This form of the @c yield keyword is often used with asynchronous operations: * * @code yield socket_->async_read_some(buffer(*buffer_), *this); @endcode * * This divides into four logical steps: * * @li @c yield saves the current state of the coroutine. * @li The statement initiates the asynchronous operation. * @li The resume point is defined immediately following the statement. * @li Control is transferred to the end of the coroutine body. * * When the asynchronous operation completes, the function object is invoked * and @c reenter causes control to transfer to the resume point. It is * important to remember to carry the coroutine state forward with the * asynchronous operation. In the above snippet, the current class is a * function object object with a coroutine object as base class or data member. * * The statement may also be a compound statement, and this permits us to * define local variables with limited scope: * * @code yield * { * mutable_buffers_1 b = buffer(*buffer_); * socket_->async_read_some(b, *this); * } @endcode * * yield return expression ; * * This form of @c yield is often used in generators or coroutine-based parsers. * For example, the function object: * * @code struct interleave : coroutine * { * istream& is1; * istream& is2; * char operator()(char c) * { * reenter (this) for (;;) * { * yield return is1.get(); * yield return is2.get(); * } * } * }; @endcode * * defines a trivial coroutine that interleaves the characters from two input * streams. * * This type of @c yield divides into three logical steps: * * @li @c yield saves the current state of the coroutine. * @li The resume point is defined immediately following the semicolon. * @li The value of the expression is returned from the function. * * yield ; * * This form of @c yield is equivalent to the following steps: * * @li @c yield saves the current state of the coroutine. * @li The resume point is defined immediately following the semicolon. * @li Control is transferred to the end of the coroutine body. * * This form might be applied when coroutines are used for cooperative * threading and scheduling is explicitly managed. For example: * * @code struct task : coroutine * { * ... * void operator()() * { * reenter (this) * { * while (... not finished ...) * { * ... do something ... * yield; * ... do some more ... * yield; * } * } * } * ... * }; * ... * task t1, t2; * for (;;) * { * t1(); * t2(); * } @endcode * * yield break ; * * The final form of @c yield is used to explicitly terminate the coroutine. * This form is comprised of two steps: * * @li @c yield sets the coroutine state to indicate termination. * @li Control is transferred to the end of the coroutine body. * * Once terminated, calls to is_complete() return true and the coroutine cannot * be reentered. * * Note that a coroutine may also be implicitly terminated if the coroutine * body is exited without a yield, e.g. by return, throw or by running to the * end of the body. * * fork statement * * The @c fork pseudo-keyword is used when "forking" a coroutine, i.e. splitting * it into two (or more) copies. One use of @c fork is in a server, where a new * coroutine is created to handle each client connection: * * @code reenter (this) * { * do * { * socket_.reset(new tcp::socket(io_service_)); * yield acceptor->async_accept(*socket_, *this); * fork server(*this)(); * } while (is_parent()); * ... client-specific handling follows ... * } @endcode * * The logical steps involved in a @c fork are: * * @li @c fork saves the current state of the coroutine. * @li The statement creates a copy of the coroutine and either executes it * immediately or schedules it for later execution. * @li The resume point is defined immediately following the semicolon. * @li For the "parent", control immediately continues from the next line. * * The functions is_parent() and is_child() can be used to differentiate * between parent and child. You would use these functions to alter subsequent * control flow. * * Note that @c fork doesn't do the actual forking by itself. It is the * application's responsibility to create a clone of the coroutine and call it. * The clone can be called immediately, as above, or scheduled for delayed * execution using something like io_service::post(). * * @par Alternate macro names * * If preferred, an application can use macro names that follow a more typical * naming convention, rather than the pseudo-keywords. These are: * * @li @c ASIO_CORO_REENTER instead of @c reenter * @li @c ASIO_CORO_YIELD instead of @c yield * @li @c ASIO_CORO_FORK instead of @c fork */ class coroutine { public: /// Constructs a coroutine in its initial state. coroutine() : value_(0) {} /// Returns true if the coroutine is the child of a fork. bool is_child() const { return value_ < 0; } /// Returns true if the coroutine is the parent of a fork. bool is_parent() const { return !is_child(); } /// Returns true if the coroutine has reached its terminal state. bool is_complete() const { return value_ == -1; } private: friend class detail::coroutine_ref; int value_; }; namespace detail { class coroutine_ref { public: coroutine_ref(coroutine& c) : value_(c.value_), modified_(false) {} coroutine_ref(coroutine* c) : value_(c->value_), modified_(false) {} ~coroutine_ref() { if (!modified_) value_ = -1; } operator int() const { return value_; } int& operator=(int v) { modified_ = true; return value_ = v; } private: void operator=(const coroutine_ref&); int& value_; bool modified_; }; } // namespace detail } // namespace asio #define ASIO_CORO_REENTER(c) \ switch (::asio::detail::coroutine_ref _coro_value = c) \ case -1: if (_coro_value) \ { \ goto terminate_coroutine; \ terminate_coroutine: \ _coro_value = -1; \ goto bail_out_of_coroutine; \ bail_out_of_coroutine: \ break; \ } \ else case 0: #define ASIO_CORO_YIELD_IMPL(n) \ for (_coro_value = (n);;) \ if (_coro_value == 0) \ { \ case (n): ; \ break; \ } \ else \ switch (_coro_value ? 0 : 1) \ for (;;) \ case -1: if (_coro_value) \ goto terminate_coroutine; \ else for (;;) \ case 1: if (_coro_value) \ goto bail_out_of_coroutine; \ else case 0: #define ASIO_CORO_FORK_IMPL(n) \ for (_coro_value = -(n);; _coro_value = (n)) \ if (_coro_value == (n)) \ { \ case -(n): ; \ break; \ } \ else #if defined(_MSC_VER) # define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__COUNTER__ + 1) # define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__COUNTER__ + 1) #else // defined(_MSC_VER) # define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__LINE__) # define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__LINE__) #endif // defined(_MSC_VER) #endif // ASIO_COROUTINE_HPP galera-3-25.3.20/asio/asio/strand.hpp0000644000015300001660000002104013042054732017025 0ustar jenkinsjenkins// // strand.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_STRAND_HPP #define ASIO_STRAND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/strand_service.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides serialised handler execution. /** * The io_service::strand class provides the ability to post and dispatch * handlers with the guarantee that none of those handlers will execute * concurrently. * * @par Order of handler invocation * Given: * * @li a strand object @c s * * @li an object @c a meeting completion handler requirements * * @li an object @c a1 which is an arbitrary copy of @c a made by the * implementation * * @li an object @c b meeting completion handler requirements * * @li an object @c b1 which is an arbitrary copy of @c b made by the * implementation * * if any of the following conditions are true: * * @li @c s.post(a) happens-before @c s.post(b) * * @li @c s.post(a) happens-before @c s.dispatch(b), where the latter is * performed outside the strand * * @li @c s.dispatch(a) happens-before @c s.post(b), where the former is * performed outside the strand * * @li @c s.dispatch(a) happens-before @c s.dispatch(b), where both are * performed outside the strand * * then @c asio_handler_invoke(a1, &a1) happens-before * @c asio_handler_invoke(b1, &b1). * * Note that in the following case: * @code async_op_1(..., s.wrap(a)); * async_op_2(..., s.wrap(b)); @endcode * the completion of the first async operation will perform @c s.dispatch(a), * and the second will perform @c s.dispatch(b), but the order in which those * are performed is unspecified. That is, you cannot state whether one * happens-before the other. Therefore none of the above conditions are met and * no ordering guarantee is made. * * @note The implementation makes no guarantee that handlers posted or * dispatched through different @c strand objects will be invoked concurrently. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Dispatcher. */ class io_service::strand { public: /// Constructor. /** * Constructs the strand. * * @param io_service The io_service object that the strand will use to * dispatch handlers that are ready to be run. */ explicit strand(asio::io_service& io_service) : service_(asio::use_service< asio::detail::strand_service>(io_service)) { service_.construct(impl_); } /// Destructor. /** * Destroys a strand. * * Handlers posted through the strand that have not yet been invoked will * still be dispatched in a way that meets the guarantee of non-concurrency. */ ~strand() { } /// Get the io_service associated with the strand. /** * This function may be used to obtain the io_service object that the strand * uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that the strand will use to * dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return service_.get_io_service(); } /// Request the strand to invoke the given handler. /** * This function is used to ask the strand to execute the given handler. * * The strand object guarantees that handlers posted or dispatched through * the strand will not be executed concurrently. The handler may be executed * inside this function if the guarantee can be met. If this function is * called from within a handler that was posted or dispatched through the same * strand, then the new handler will be executed immediately. * * The strand's guarantee is in addition to the guarantee provided by the * underlying io_service. The io_service guarantees that the handler will only * be called in a thread in which the io_service's run member function is * currently being invoked. * * @param handler The handler to be called. The strand will make a copy of the * handler object as required. The function signature of the handler must be: * @code void handler(); @endcode */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) dispatch(ASIO_MOVE_ARG(CompletionHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a CompletionHandler. ASIO_COMPLETION_HANDLER_CHECK(CompletionHandler, handler) type_check; detail::async_result_init< CompletionHandler, void ()> init( ASIO_MOVE_CAST(CompletionHandler)(handler)); service_.dispatch(impl_, init.handler); return init.result.get(); } /// Request the strand to invoke the given handler and return /// immediately. /** * This function is used to ask the strand to execute the given handler, but * without allowing the strand to call the handler from inside this function. * * The strand object guarantees that handlers posted or dispatched through * the strand will not be executed concurrently. The strand's guarantee is in * addition to the guarantee provided by the underlying io_service. The * io_service guarantees that the handler will only be called in a thread in * which the io_service's run member function is currently being invoked. * * @param handler The handler to be called. The strand will make a copy of the * handler object as required. The function signature of the handler must be: * @code void handler(); @endcode */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) post(ASIO_MOVE_ARG(CompletionHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a CompletionHandler. ASIO_COMPLETION_HANDLER_CHECK(CompletionHandler, handler) type_check; detail::async_result_init< CompletionHandler, void ()> init( ASIO_MOVE_CAST(CompletionHandler)(handler)); service_.post(impl_, init.handler); return init.result.get(); } /// Create a new handler that automatically dispatches the wrapped handler /// on the strand. /** * This function is used to create a new handler function object that, when * invoked, will automatically pass the wrapped handler to the strand's * dispatch function. * * @param handler The handler to be wrapped. The strand will make a copy of * the handler object as required. The function signature of the handler must * be: @code void handler(A1 a1, ... An an); @endcode * * @return A function object that, when invoked, passes the wrapped handler to * the strand's dispatch function. Given a function object with the signature: * @code R f(A1 a1, ... An an); @endcode * If this function object is passed to the wrap function like so: * @code strand.wrap(f); @endcode * then the return value is a function object with the signature * @code void g(A1 a1, ... An an); @endcode * that, when invoked, executes code equivalent to: * @code strand.dispatch(boost::bind(f, a1, ... an)); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else detail::wrapped_handler #endif wrap(Handler handler) { return detail::wrapped_handler(*this, handler); } /// Determine whether the strand is running in the current thread. /** * @return @c true if the current thread is executing a handler that was * submitted to the strand using post(), dispatch() or wrap(). Otherwise * returns @c false. */ bool running_in_this_thread() const { return service_.running_in_this_thread(impl_); } private: asio::detail::strand_service& service_; asio::detail::strand_service::implementation_type impl_; }; /// (Deprecated: Use asio::io_service::strand.) Typedef for backwards /// compatibility. typedef asio::io_service::strand strand; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_STRAND_HPP galera-3-25.3.20/asio/asio/seq_packet_socket_service.hpp0000644000015300001660000002671013042054732022752 0ustar jenkinsjenkins// // seq_packet_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SEQ_PACKET_SOCKET_SERVICE_HPP #define ASIO_SEQ_PACKET_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_socket_service.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_socket_service.hpp" #else # include "asio/detail/reactive_socket_service.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a sequenced packet socket. template class seq_packet_socket_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base< seq_packet_socket_service > #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; private: // The type of the platform-specific implementation. #if defined(ASIO_WINDOWS_RUNTIME) typedef detail::null_socket_service service_impl_type; #elif defined(ASIO_HAS_IOCP) typedef detail::win_iocp_socket_service service_impl_type; #else typedef detail::reactive_socket_service service_impl_type; #endif public: /// The type of a sequenced packet socket implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef typename service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native socket type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef typename service_impl_type::native_handle_type native_type; #endif /// The native socket type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new sequenced packet socket service for the specified /// io_service. explicit seq_packet_socket_service(asio::io_service& io_service) : asio::detail::service_base< seq_packet_socket_service >(io_service), service_impl_(io_service) { } /// Construct a new sequenced packet socket implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new sequenced packet socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another sequenced packet socket implementation. void move_assign(implementation_type& impl, seq_packet_socket_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } /// Move-construct a new sequenced packet socket implementation from another /// protocol type. template void converting_move_construct(implementation_type& impl, typename seq_packet_socket_service< Protocol1>::implementation_type& other_impl, typename enable_if::value>::type* = 0) { service_impl_.template converting_move_construct( impl, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a sequenced packet socket implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Open a sequenced packet socket. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (protocol.type() == ASIO_OS_DEF(SOCK_SEQPACKET)) service_impl_.open(impl, protocol, ec); else ec = asio::error::invalid_argument; return ec; } /// Assign an existing native socket to a sequenced packet socket. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { return service_impl_.assign(impl, protocol, native_socket, ec); } /// Determine whether the socket is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a sequenced packet socket implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native socket implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native socket implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the socket. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Determine whether the socket is at the out-of-band data mark. bool at_mark(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.at_mark(impl, ec); } /// Determine the number of bytes available for reading. std::size_t available(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.available(impl, ec); } /// Bind the sequenced packet socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { return service_impl_.bind(impl, endpoint, ec); } /// Connect the sequenced packet socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { return service_impl_.connect(impl, peer_endpoint, ec); } /// Start an asynchronous connect. template ASIO_INITFN_RESULT_TYPE(ConnectHandler, void (asio::error_code)) async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { detail::async_result_init< ConnectHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ConnectHandler)(handler)); service_impl_.async_connect(impl, peer_endpoint, init.handler); return init.result.get(); } /// Set a socket option. template asio::error_code set_option(implementation_type& impl, const SettableSocketOption& option, asio::error_code& ec) { return service_impl_.set_option(impl, option, ec); } /// Get a socket option. template asio::error_code get_option(const implementation_type& impl, GettableSocketOption& option, asio::error_code& ec) const { return service_impl_.get_option(impl, option, ec); } /// Perform an IO control command on the socket. template asio::error_code io_control(implementation_type& impl, IoControlCommand& command, asio::error_code& ec) { return service_impl_.io_control(impl, command, ec); } /// Gets the non-blocking mode of the socket. bool non_blocking(const implementation_type& impl) const { return service_impl_.non_blocking(impl); } /// Sets the non-blocking mode of the socket. asio::error_code non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { return service_impl_.non_blocking(impl, mode, ec); } /// Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const implementation_type& impl) const { return service_impl_.native_non_blocking(impl); } /// Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { return service_impl_.native_non_blocking(impl, mode, ec); } /// Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.local_endpoint(impl, ec); } /// Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.remote_endpoint(impl, ec); } /// Disable sends or receives on the socket. asio::error_code shutdown(implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { return service_impl_.shutdown(impl, what, ec); } /// Send the given data to the peer. template std::size_t send(implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.send(impl, buffers, flags, ec); } /// Start an asynchronous send. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_send(impl, buffers, flags, init.handler); return init.result.get(); } /// Receive some data from the peer. template std::size_t receive(implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { return service_impl_.receive_with_flags(impl, buffers, in_flags, out_flags, ec); } /// Start an asynchronous receive. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_receive_with_flags(impl, buffers, in_flags, out_flags, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SEQ_PACKET_SOCKET_SERVICE_HPP galera-3-25.3.20/asio/asio/placeholders.hpp0000644000015300001660000000576313042054732020215 0ustar jenkinsjenkins// // placeholders.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_PLACEHOLDERS_HPP #define ASIO_PLACEHOLDERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_BIND) # include #endif // defined(ASIO_HAS_BOOST_BIND) #include "asio/detail/push_options.hpp" namespace asio { namespace placeholders { #if defined(GENERATING_DOCUMENTATION) /// An argument placeholder, for use with boost::bind(), that corresponds to /// the error argument of a handler for any of the asynchronous functions. unspecified error; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the bytes_transferred argument of a handler for asynchronous functions such /// as asio::basic_stream_socket::async_write_some or /// asio::async_write. unspecified bytes_transferred; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the iterator argument of a handler for asynchronous functions such as /// asio::basic_resolver::async_resolve. unspecified iterator; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the signal_number argument of a handler for asynchronous functions such as /// asio::signal_set::async_wait. unspecified signal_number; #elif defined(ASIO_HAS_BOOST_BIND) # if defined(__BORLANDC__) || defined(__GNUC__) inline boost::arg<1> error() { return boost::arg<1>(); } inline boost::arg<2> bytes_transferred() { return boost::arg<2>(); } inline boost::arg<2> iterator() { return boost::arg<2>(); } inline boost::arg<2> signal_number() { return boost::arg<2>(); } # else namespace detail { template struct placeholder { static boost::arg& get() { static boost::arg result; return result; } }; } # if defined(ASIO_MSVC) && (ASIO_MSVC < 1400) static boost::arg<1>& error = asio::placeholders::detail::placeholder<1>::get(); static boost::arg<2>& bytes_transferred = asio::placeholders::detail::placeholder<2>::get(); static boost::arg<2>& iterator = asio::placeholders::detail::placeholder<2>::get(); static boost::arg<2>& signal_number = asio::placeholders::detail::placeholder<2>::get(); # else namespace { boost::arg<1>& error = asio::placeholders::detail::placeholder<1>::get(); boost::arg<2>& bytes_transferred = asio::placeholders::detail::placeholder<2>::get(); boost::arg<2>& iterator = asio::placeholders::detail::placeholder<2>::get(); boost::arg<2>& signal_number = asio::placeholders::detail::placeholder<2>::get(); } // namespace # endif # endif #endif } // namespace placeholders } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_PLACEHOLDERS_HPP galera-3-25.3.20/asio/asio/local/0000755000015300001660000000000013042054732016116 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/local/connect_pair.hpp0000644000015300001660000000610513042054732021275 0ustar jenkinsjenkins// // local/connect_pair.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_CONNECT_PAIR_HPP #define ASIO_LOCAL_CONNECT_PAIR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_socket.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Create a pair of connected sockets. template void connect_pair( basic_socket& socket1, basic_socket& socket2); /// Create a pair of connected sockets. template asio::error_code connect_pair( basic_socket& socket1, basic_socket& socket2, asio::error_code& ec); template inline void connect_pair( basic_socket& socket1, basic_socket& socket2) { asio::error_code ec; connect_pair(socket1, socket2, ec); asio::detail::throw_error(ec, "connect_pair"); } template inline asio::error_code connect_pair( basic_socket& socket1, basic_socket& socket2, asio::error_code& ec) { // Check that this function is only being used with a UNIX domain socket. asio::local::basic_endpoint* tmp = static_cast(0); (void)tmp; Protocol protocol; asio::detail::socket_type sv[2]; if (asio::detail::socket_ops::socketpair(protocol.family(), protocol.type(), protocol.protocol(), sv, ec) == asio::detail::socket_error_retval) return ec; if (socket1.assign(protocol, sv[0], ec)) { asio::error_code temp_ec; asio::detail::socket_ops::state_type state[2] = { 0, 0 }; asio::detail::socket_ops::close(sv[0], state[0], true, temp_ec); asio::detail::socket_ops::close(sv[1], state[1], true, temp_ec); return ec; } if (socket2.assign(protocol, sv[1], ec)) { asio::error_code temp_ec; socket1.close(temp_ec); asio::detail::socket_ops::state_type state = 0; asio::detail::socket_ops::close(sv[1], state, true, temp_ec); return ec; } return ec; } } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_CONNECT_PAIR_HPP galera-3-25.3.20/asio/asio/local/detail/0000755000015300001660000000000013042054732017360 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/local/detail/endpoint.hpp0000644000015300001660000000642713042054732021722 0ustar jenkinsjenkins// // local/detail/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Derived from a public domain implementation written by Daniel Casimiro. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_DETAIL_ENDPOINT_HPP #define ASIO_LOCAL_DETAIL_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) #include #include #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { namespace detail { // Helper class for implementing a UNIX domain endpoint. class endpoint { public: // Default constructor. ASIO_DECL endpoint(); // Construct an endpoint using the specified path name. ASIO_DECL endpoint(const char* path_name); // Construct an endpoint using the specified path name. ASIO_DECL endpoint(const std::string& path_name); // Copy constructor. endpoint(const endpoint& other) : data_(other.data_), path_length_(other.path_length_) { } // Assign from another endpoint. endpoint& operator=(const endpoint& other) { data_ = other.data_; path_length_ = other.path_length_; return *this; } // Get the underlying endpoint in the native type. asio::detail::socket_addr_type* data() { return &data_.base; } // Get the underlying endpoint in the native type. const asio::detail::socket_addr_type* data() const { return &data_.base; } // Get the underlying size of the endpoint in the native type. std::size_t size() const { return path_length_ + offsetof(asio::detail::sockaddr_un_type, sun_path); } // Set the underlying size of the endpoint in the native type. ASIO_DECL void resize(std::size_t size); // Get the capacity of the endpoint in the native type. std::size_t capacity() const { return sizeof(asio::detail::sockaddr_un_type); } // Get the path associated with the endpoint. ASIO_DECL std::string path() const; // Set the path associated with the endpoint. ASIO_DECL void path(const char* p); // Set the path associated with the endpoint. ASIO_DECL void path(const std::string& p); // Compare two endpoints for equality. ASIO_DECL friend bool operator==( const endpoint& e1, const endpoint& e2); // Compare endpoints for ordering. ASIO_DECL friend bool operator<( const endpoint& e1, const endpoint& e2); private: // The underlying UNIX socket address. union data_union { asio::detail::socket_addr_type base; asio::detail::sockaddr_un_type local; } data_; // The length of the path associated with the endpoint. std::size_t path_length_; // Initialise with a specified path. ASIO_DECL void init(const char* path, std::size_t path_length); }; } // namespace detail } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/local/detail/impl/endpoint.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_LOCAL_SOCKETS) #endif // ASIO_LOCAL_DETAIL_ENDPOINT_HPP galera-3-25.3.20/asio/asio/local/detail/impl/0000755000015300001660000000000013042054732020321 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/local/detail/impl/endpoint.ipp0000644000015300001660000000605513042054732022661 0ustar jenkinsjenkins// // local/detail/impl/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Derived from a public domain implementation written by Daniel Casimiro. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP #define ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/local/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { namespace detail { endpoint::endpoint() { init("", 0); } endpoint::endpoint(const char* path_name) { using namespace std; // For strlen. init(path_name, strlen(path_name)); } endpoint::endpoint(const std::string& path_name) { init(path_name.data(), path_name.length()); } void endpoint::resize(std::size_t new_size) { if (new_size > sizeof(asio::detail::sockaddr_un_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } else if (new_size == 0) { path_length_ = 0; } else { path_length_ = new_size - offsetof(asio::detail::sockaddr_un_type, sun_path); // The path returned by the operating system may be NUL-terminated. if (path_length_ > 0 && data_.local.sun_path[path_length_ - 1] == 0) --path_length_; } } std::string endpoint::path() const { return std::string(data_.local.sun_path, path_length_); } void endpoint::path(const char* p) { using namespace std; // For strlen. init(p, strlen(p)); } void endpoint::path(const std::string& p) { init(p.data(), p.length()); } bool operator==(const endpoint& e1, const endpoint& e2) { return e1.path() == e2.path(); } bool operator<(const endpoint& e1, const endpoint& e2) { return e1.path() < e2.path(); } void endpoint::init(const char* path_name, std::size_t path_length) { if (path_length > sizeof(data_.local.sun_path) - 1) { // The buffer is not large enough to store this address. asio::error_code ec(asio::error::name_too_long); asio::detail::throw_error(ec); } using namespace std; // For memcpy. data_.local = asio::detail::sockaddr_un_type(); data_.local.sun_family = AF_UNIX; memcpy(data_.local.sun_path, path_name, path_length); path_length_ = path_length; // NUL-terminate normal path names. Names that start with a NUL are in the // UNIX domain protocol's "abstract namespace" and are not NUL-terminated. if (path_length > 0 && data_.local.sun_path[0] == 0) data_.local.sun_path[path_length] = 0; } } // namespace detail } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) #endif // ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP galera-3-25.3.20/asio/asio/local/basic_endpoint.hpp0000644000015300001660000001266713042054732021624 0ustar jenkinsjenkins// // local/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Derived from a public domain implementation written by Daniel Casimiro. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_BASIC_ENDPOINT_HPP #define ASIO_LOCAL_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/local/detail/endpoint.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Describes an endpoint for a UNIX socket. /** * The asio::local::basic_endpoint class template describes an endpoint * that may be associated with a particular UNIX socket. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * Endpoint. */ template class basic_endpoint { public: /// The protocol type associated with the endpoint. typedef Protocol protocol_type; /// The type of the endpoint structure. This type is dependent on the /// underlying implementation of the socket layer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined data_type; #else typedef asio::detail::socket_addr_type data_type; #endif /// Default constructor. basic_endpoint() { } /// Construct an endpoint using the specified path name. basic_endpoint(const char* path_name) : impl_(path_name) { } /// Construct an endpoint using the specified path name. basic_endpoint(const std::string& path_name) : impl_(path_name) { } /// Copy constructor. basic_endpoint(const basic_endpoint& other) : impl_(other.impl_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_endpoint(basic_endpoint&& other) : impl_(other.impl_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another endpoint. basic_endpoint& operator=(const basic_endpoint& other) { impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another endpoint. basic_endpoint& operator=(basic_endpoint&& other) { impl_ = other.impl_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// The protocol associated with the endpoint. protocol_type protocol() const { return protocol_type(); } /// Get the underlying endpoint in the native type. data_type* data() { return impl_.data(); } /// Get the underlying endpoint in the native type. const data_type* data() const { return impl_.data(); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const { return impl_.size(); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t new_size) { impl_.resize(new_size); } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const { return impl_.capacity(); } /// Get the path associated with the endpoint. std::string path() const { return impl_.path(); } /// Set the path associated with the endpoint. void path(const char* p) { impl_.path(p); } /// Set the path associated with the endpoint. void path(const std::string& p) { impl_.path(p); } /// Compare two endpoints for equality. friend bool operator==(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ == e2.impl_; } /// Compare two endpoints for inequality. friend bool operator!=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1.impl_ == e2.impl_); } /// Compare endpoints for ordering. friend bool operator<(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ < e2.impl_; } /// Compare endpoints for ordering. friend bool operator>(const basic_endpoint& e1, const basic_endpoint& e2) { return e2.impl_ < e1.impl_; } /// Compare endpoints for ordering. friend bool operator<=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e2 < e1); } /// Compare endpoints for ordering. friend bool operator>=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1 < e2); } private: // The underlying UNIX domain endpoint. asio::local::detail::endpoint impl_; }; /// Output an endpoint as a string. /** * Used to output a human-readable string for a specified endpoint. * * @param os The output stream to which the string will be written. * * @param endpoint The endpoint to be written. * * @return The output stream. * * @relates asio::local::basic_endpoint */ template std::basic_ostream& operator<<( std::basic_ostream& os, const basic_endpoint& endpoint) { os << endpoint.path(); return os; } } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_BASIC_ENDPOINT_HPP galera-3-25.3.20/asio/asio/local/datagram_protocol.hpp0000644000015300001660000000352613042054732022336 0ustar jenkinsjenkins// // local/datagram_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP #define ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_datagram_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Encapsulates the flags needed for datagram-oriented UNIX sockets. /** * The asio::local::datagram_protocol class contains flags necessary for * datagram-oriented UNIX domain sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class datagram_protocol { public: /// Obtain an identifier for the type of the protocol. int type() const { return SOCK_DGRAM; } /// Obtain an identifier for the protocol. int protocol() const { return 0; } /// Obtain an identifier for the protocol family. int family() const { return AF_UNIX; } /// The type of a UNIX domain endpoint. typedef basic_endpoint endpoint; /// The UNIX domain socket type. typedef basic_datagram_socket socket; }; } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP galera-3-25.3.20/asio/asio/local/stream_protocol.hpp0000644000015300001660000000422713042054732022050 0ustar jenkinsjenkins// // local/stream_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_STREAM_PROTOCOL_HPP #define ASIO_LOCAL_STREAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Encapsulates the flags needed for stream-oriented UNIX sockets. /** * The asio::local::stream_protocol class contains flags necessary for * stream-oriented UNIX domain sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class stream_protocol { public: /// Obtain an identifier for the type of the protocol. int type() const { return SOCK_STREAM; } /// Obtain an identifier for the protocol. int protocol() const { return 0; } /// Obtain an identifier for the protocol family. int family() const { return AF_UNIX; } /// The type of a UNIX domain endpoint. typedef basic_endpoint endpoint; /// The UNIX domain socket type. typedef basic_stream_socket socket; /// The UNIX domain acceptor type. typedef basic_socket_acceptor acceptor; #if !defined(ASIO_NO_IOSTREAM) /// The UNIX domain iostream type. typedef basic_socket_iostream iostream; #endif // !defined(ASIO_NO_IOSTREAM) }; } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_STREAM_PROTOCOL_HPP galera-3-25.3.20/asio/asio/basic_socket.hpp0000644000015300001660000014265113042054732020177 0ustar jenkinsjenkins// // basic_socket.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_HPP #define ASIO_BASIC_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/basic_io_object.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides socket functionality. /** * The basic_socket class template provides functionality that is common to both * stream-oriented and datagram-oriented sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_socket : public basic_io_object, public socket_base { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename SocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename SocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// A basic_socket is always the lowest layer. typedef basic_socket lowest_layer_type; /// Construct a basic_socket without opening it. /** * This constructor creates a socket without opening it. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_socket(asio::io_service& io_service) : basic_io_object(io_service) { } /// Construct and open a basic_socket. /** * This constructor creates and opens a socket. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_io_object(io_service) { asio::error_code ec; this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Construct a basic_socket, opening it and binding it to the given local /// endpoint. /** * This constructor creates a socket and automatically opens it bound to the * specified endpoint on the local machine. The protocol used is the protocol * associated with the given endpoint. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_io_object(io_service) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); this->get_service().bind(this->get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Construct a basic_socket on an existing native socket. /** * This constructor creates a socket object to hold an existing native socket. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_io_object(io_service) { asio::error_code ec; this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_socket from another. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ basic_socket(basic_socket&& other) : basic_io_object( ASIO_MOVE_CAST(basic_socket)(other)) { } /// Move-assign a basic_socket from another. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ basic_socket& operator=(basic_socket&& other) { basic_io_object::operator=( ASIO_MOVE_CAST(basic_socket)(other)); return *this; } // All sockets have access to each other's implementations. template friend class basic_socket; /// Move-construct a basic_socket from a socket of another protocol type. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ template basic_socket(basic_socket&& other, typename enable_if::value>::type* = 0) : basic_io_object(other.get_io_service()) { this->get_service().template converting_move_construct( this->get_implementation(), other.get_implementation()); } /// Move-assign a basic_socket from a socket of another protocol type. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ template typename enable_if::value, basic_socket>::type& operator=( basic_socket&& other) { basic_socket tmp(ASIO_MOVE_CAST2(basic_socket< Protocol1, SocketService1>)(other)); basic_io_object::operator=( ASIO_MOVE_CAST(basic_socket)(tmp)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * @endcode */ void open(const protocol_type& protocol = protocol_type()) { asio::error_code ec; this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying which protocol is to be used. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::error_code ec; * socket.open(asio::ip::tcp::v4(), ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code open(const protocol_type& protocol, asio::error_code& ec) { return this->get_service().open(this->get_implementation(), protocol, ec); } /// Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ void assign(const protocol_type& protocol, const native_handle_type& native_socket) { asio::error_code ec; this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code assign(const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { return this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); } /// Determine whether the socket is open. bool is_open() const { return this->get_service().is_open(this->get_implementation()); } /// Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ void close() { asio::error_code ec; this->get_service().close(this->get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * socket.close(ec); * if (ec) * { * // An error occurred. * } * @endcode * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ asio::error_code close(asio::error_code& ec) { return this->get_service().close(this->get_implementation(), ec); } /// (Deprecated: Use native_handle().) Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_type native() { return this->get_service().native_handle(this->get_implementation()); } /// Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_handle_type native_handle() { return this->get_service().native_handle(this->get_implementation()); } /// Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. * * @note Calls to cancel() will always fail with * asio::error::operation_not_supported when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(ASIO_ENABLE_CANCELIO) __declspec(deprecated("By default, this function always fails with " "operation_not_supported when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif void cancel() { asio::error_code ec; this->get_service().cancel(this->get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. * * @note Calls to cancel() will always fail with * asio::error::operation_not_supported when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(ASIO_ENABLE_CANCELIO) __declspec(deprecated("By default, this function always fails with " "operation_not_supported when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif asio::error_code cancel(asio::error_code& ec) { return this->get_service().cancel(this->get_implementation(), ec); } /// Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @return A bool indicating whether the socket is at the out-of-band data * mark. * * @throws asio::system_error Thrown on failure. */ bool at_mark() const { asio::error_code ec; bool b = this->get_service().at_mark(this->get_implementation(), ec); asio::detail::throw_error(ec, "at_mark"); return b; } /// Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @param ec Set to indicate what error occurred, if any. * * @return A bool indicating whether the socket is at the out-of-band data * mark. */ bool at_mark(asio::error_code& ec) const { return this->get_service().at_mark(this->get_implementation(), ec); } /// Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. * * @throws asio::system_error Thrown on failure. */ std::size_t available() const { asio::error_code ec; std::size_t s = this->get_service().available( this->get_implementation(), ec); asio::detail::throw_error(ec, "available"); return s; } /// Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. */ std::size_t available(asio::error_code& ec) const { return this->get_service().available(this->get_implementation(), ec); } /// Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345)); * @endcode */ void bind(const endpoint_type& endpoint) { asio::error_code ec; this->get_service().bind(this->get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * asio::error_code ec; * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345), ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code bind(const endpoint_type& endpoint, asio::error_code& ec) { return this->get_service().bind(this->get_implementation(), endpoint, ec); } /// Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.connect(endpoint); * @endcode */ void connect(const endpoint_type& peer_endpoint) { asio::error_code ec; if (!is_open()) { this->get_service().open(this->get_implementation(), peer_endpoint.protocol(), ec); asio::detail::throw_error(ec, "connect"); } this->get_service().connect(this->get_implementation(), peer_endpoint, ec); asio::detail::throw_error(ec, "connect"); } /// Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * asio::error_code ec; * socket.connect(endpoint, ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code connect(const endpoint_type& peer_endpoint, asio::error_code& ec) { if (!is_open()) { if (this->get_service().open(this->get_implementation(), peer_endpoint.protocol(), ec)) { return ec; } } return this->get_service().connect( this->get_implementation(), peer_endpoint, ec); } /// Start an asynchronous connect. /** * This function is used to asynchronously connect a socket to the specified * remote endpoint. The function call always returns immediately. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. Copies will be made of the endpoint object as required. * * @param handler The handler to be called when the connection operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * @code * void connect_handler(const asio::error_code& error) * { * if (!error) * { * // Connect succeeded. * } * } * * ... * * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_connect(endpoint, connect_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ConnectHandler, void (asio::error_code)) async_connect(const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ConnectHandler. ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check; if (!is_open()) { asio::error_code ec; const protocol_type protocol = peer_endpoint.protocol(); if (this->get_service().open(this->get_implementation(), protocol, ec)) { detail::async_result_init< ConnectHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ConnectHandler)(handler)); this->get_io_service().post( asio::detail::bind_handler( ASIO_MOVE_CAST(ASIO_HANDLER_TYPE( ConnectHandler, void (asio::error_code)))( init.handler), ec)); return init.result.get(); } } return this->get_service().async_connect(this->get_implementation(), peer_endpoint, ASIO_MOVE_CAST(ConnectHandler)(handler)); } /// Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @throws asio::system_error Thrown on failure. * * @sa SettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * socket.set_option(option); * @endcode */ template void set_option(const SettableSocketOption& option) { asio::error_code ec; this->get_service().set_option(this->get_implementation(), option, ec); asio::detail::throw_error(ec, "set_option"); } /// Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa SettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * asio::error_code ec; * socket.set_option(option, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template asio::error_code set_option(const SettableSocketOption& option, asio::error_code& ec) { return this->get_service().set_option( this->get_implementation(), option, ec); } /// Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @throws asio::system_error Thrown on failure. * * @sa GettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode */ template void get_option(GettableSocketOption& option) const { asio::error_code ec; this->get_service().get_option(this->get_implementation(), option, ec); asio::detail::throw_error(ec, "get_option"); } /// Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa GettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::keep_alive option; * asio::error_code ec; * socket.get_option(option, ec); * if (ec) * { * // An error occurred. * } * bool is_set = option.value(); * @endcode */ template asio::error_code get_option(GettableSocketOption& option, asio::error_code& ec) const { return this->get_service().get_option( this->get_implementation(), option, ec); } /// Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @throws asio::system_error Thrown on failure. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::bytes_readable command; * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode */ template void io_control(IoControlCommand& command) { asio::error_code ec; this->get_service().io_control(this->get_implementation(), command, ec); asio::detail::throw_error(ec, "io_control"); } /// Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::bytes_readable command; * asio::error_code ec; * socket.io_control(command, ec); * if (ec) * { * // An error occurred. * } * std::size_t bytes_readable = command.get(); * @endcode */ template asio::error_code io_control(IoControlCommand& command, asio::error_code& ec) { return this->get_service().io_control( this->get_implementation(), command, ec); } /// Gets the non-blocking mode of the socket. /** * @returns @c true if the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ bool non_blocking() const { return this->get_service().non_blocking(this->get_implementation()); } /// Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @throws asio::system_error Thrown on failure. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ void non_blocking(bool mode) { asio::error_code ec; this->get_service().non_blocking(this->get_implementation(), mode, ec); asio::detail::throw_error(ec, "non_blocking"); } /// Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @param ec Set to indicate what error occurred, if any. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ asio::error_code non_blocking( bool mode, asio::error_code& ec) { return this->get_service().non_blocking( this->get_implementation(), mode, ec); } /// Gets the non-blocking mode of the native socket implementation. /** * This function is used to retrieve the non-blocking mode of the underlying * native socket. This mode has no effect on the behaviour of the socket * object's synchronous operations. * * @returns @c true if the underlying socket is in non-blocking mode and * direct system calls may fail with asio::error::would_block (or the * equivalent system error). * * @note The current non-blocking mode is cached by the socket object. * Consequently, the return value may be incorrect if the non-blocking mode * was set directly on the native socket. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ bool native_non_blocking() const { return this->get_service().native_non_blocking(this->get_implementation()); } /// Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @throws asio::system_error Thrown on failure. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ void native_non_blocking(bool mode) { asio::error_code ec; this->get_service().native_non_blocking( this->get_implementation(), mode, ec); asio::detail::throw_error(ec, "native_non_blocking"); } /// Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @param ec Set to indicate what error occurred, if any. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ asio::error_code native_non_blocking( bool mode, asio::error_code& ec) { return this->get_service().native_non_blocking( this->get_implementation(), mode, ec); } /// Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @returns An object that represents the local endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(); * @endcode */ endpoint_type local_endpoint() const { asio::error_code ec; endpoint_type ep = this->get_service().local_endpoint( this->get_implementation(), ec); asio::detail::throw_error(ec, "local_endpoint"); return ep; } /// Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the local endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type local_endpoint(asio::error_code& ec) const { return this->get_service().local_endpoint(this->get_implementation(), ec); } /// Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @returns An object that represents the remote endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(); * @endcode */ endpoint_type remote_endpoint() const { asio::error_code ec; endpoint_type ep = this->get_service().remote_endpoint( this->get_implementation(), ec); asio::detail::throw_error(ec, "remote_endpoint"); return ep; } /// Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the remote endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type remote_endpoint(asio::error_code& ec) const { return this->get_service().remote_endpoint(this->get_implementation(), ec); } /// Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @throws asio::system_error Thrown on failure. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(io_service); * ... * socket.shutdown(asio::ip::tcp::socket::shutdown_send); * @endcode */ void shutdown(shutdown_type what) { asio::error_code ec; this->get_service().shutdown(this->get_implementation(), what, ec); asio::detail::throw_error(ec, "shutdown"); } /// Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code shutdown(shutdown_type what, asio::error_code& ec) { return this->get_service().shutdown(this->get_implementation(), what, ec); } protected: /// Protected destructor to prevent deletion through this type. ~basic_socket() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SOCKET_HPP galera-3-25.3.20/asio/asio/buffered_stream.hpp0000644000015300001660000001711713042054732020701 0ustar jenkinsjenkins// // buffered_stream.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_STREAM_HPP #define ASIO_BUFFERED_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffered_read_stream.hpp" #include "asio/buffered_write_stream.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the read- and write-related operations of a stream. /** * The buffered_stream class template can be used to add buffering to the * synchronous and asynchronous read and write operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_stream(Arg& a) : inner_stream_impl_(a), stream_impl_(inner_stream_impl_) { } /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_stream(Arg& a, std::size_t read_buffer_size, std::size_t write_buffer_size) : inner_stream_impl_(a, write_buffer_size), stream_impl_(inner_stream_impl_, read_buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return stream_impl_.next_layer().next_layer(); } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return stream_impl_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return stream_impl_.lowest_layer(); } /// Get the io_service associated with the object. asio::io_service& get_io_service() { return stream_impl_.get_io_service(); } /// Close the stream. void close() { stream_impl_.close(); } /// Close the stream. asio::error_code close(asio::error_code& ec) { return stream_impl_.close(ec); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation. Throws an /// exception on failure. std::size_t flush() { return stream_impl_.next_layer().flush(); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation, or 0 if an /// error occurred. std::size_t flush(asio::error_code& ec) { return stream_impl_.next_layer().flush(ec); } /// Start an asynchronous flush. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_flush(ASIO_MOVE_ARG(WriteHandler) handler) { return stream_impl_.next_layer().async_flush( ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers) { return stream_impl_.write_some(buffers); } /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.write_some(buffers, ec); } /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return stream_impl_.async_write_some(buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation. Throws an exception on failure. std::size_t fill() { return stream_impl_.fill(); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation, or 0 if an error occurred. std::size_t fill(asio::error_code& ec) { return stream_impl_.fill(ec); } /// Start an asynchronous fill. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_fill(ASIO_MOVE_ARG(ReadHandler) handler) { return stream_impl_.async_fill(ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers) { return stream_impl_.read_some(buffers); } /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.read_some(buffers, ec); } /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return stream_impl_.async_read_some(buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers) { return stream_impl_.peek(buffers); } /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.peek(buffers, ec); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return stream_impl_.in_avail(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { return stream_impl_.in_avail(ec); } private: // The buffered write stream. typedef buffered_write_stream write_stream_type; write_stream_type inner_stream_impl_; // The buffered read stream. typedef buffered_read_stream read_stream_type; read_stream_type stream_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BUFFERED_STREAM_HPP galera-3-25.3.20/asio/asio/stream_socket_service.hpp0000644000015300001660000002617013042054732022126 0ustar jenkinsjenkins// // stream_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_STREAM_SOCKET_SERVICE_HPP #define ASIO_STREAM_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_ssocket_service.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_socket_service.hpp" #else # include "asio/detail/reactive_socket_service.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a stream socket. template class stream_socket_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base > #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; private: // The type of the platform-specific implementation. #if defined(ASIO_WINDOWS_RUNTIME) typedef detail::winrt_ssocket_service service_impl_type; #elif defined(ASIO_HAS_IOCP) typedef detail::win_iocp_socket_service service_impl_type; #else typedef detail::reactive_socket_service service_impl_type; #endif public: /// The type of a stream socket implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef typename service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native socket type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef typename service_impl_type::native_handle_type native_type; #endif /// The native socket type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new stream socket service for the specified io_service. explicit stream_socket_service(asio::io_service& io_service) : asio::detail::service_base< stream_socket_service >(io_service), service_impl_(io_service) { } /// Construct a new stream socket implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new stream socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another stream socket implementation. void move_assign(implementation_type& impl, stream_socket_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } /// Move-construct a new stream socket implementation from another protocol /// type. template void converting_move_construct(implementation_type& impl, typename stream_socket_service< Protocol1>::implementation_type& other_impl, typename enable_if::value>::type* = 0) { service_impl_.template converting_move_construct( impl, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a stream socket implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Open a stream socket. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (protocol.type() == ASIO_OS_DEF(SOCK_STREAM)) service_impl_.open(impl, protocol, ec); else ec = asio::error::invalid_argument; return ec; } /// Assign an existing native socket to a stream socket. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { return service_impl_.assign(impl, protocol, native_socket, ec); } /// Determine whether the socket is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a stream socket implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native socket implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native socket implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the socket. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Determine whether the socket is at the out-of-band data mark. bool at_mark(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.at_mark(impl, ec); } /// Determine the number of bytes available for reading. std::size_t available(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.available(impl, ec); } /// Bind the stream socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { return service_impl_.bind(impl, endpoint, ec); } /// Connect the stream socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { return service_impl_.connect(impl, peer_endpoint, ec); } /// Start an asynchronous connect. template ASIO_INITFN_RESULT_TYPE(ConnectHandler, void (asio::error_code)) async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { detail::async_result_init< ConnectHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ConnectHandler)(handler)); service_impl_.async_connect(impl, peer_endpoint, init.handler); return init.result.get(); } /// Set a socket option. template asio::error_code set_option(implementation_type& impl, const SettableSocketOption& option, asio::error_code& ec) { return service_impl_.set_option(impl, option, ec); } /// Get a socket option. template asio::error_code get_option(const implementation_type& impl, GettableSocketOption& option, asio::error_code& ec) const { return service_impl_.get_option(impl, option, ec); } /// Perform an IO control command on the socket. template asio::error_code io_control(implementation_type& impl, IoControlCommand& command, asio::error_code& ec) { return service_impl_.io_control(impl, command, ec); } /// Gets the non-blocking mode of the socket. bool non_blocking(const implementation_type& impl) const { return service_impl_.non_blocking(impl); } /// Sets the non-blocking mode of the socket. asio::error_code non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { return service_impl_.non_blocking(impl, mode, ec); } /// Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const implementation_type& impl) const { return service_impl_.native_non_blocking(impl); } /// Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { return service_impl_.native_non_blocking(impl, mode, ec); } /// Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.local_endpoint(impl, ec); } /// Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.remote_endpoint(impl, ec); } /// Disable sends or receives on the socket. asio::error_code shutdown(implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { return service_impl_.shutdown(impl, what, ec); } /// Send the given data to the peer. template std::size_t send(implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.send(impl, buffers, flags, ec); } /// Start an asynchronous send. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_send(impl, buffers, flags, init.handler); return init.result.get(); } /// Receive some data from the peer. template std::size_t receive(implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.receive(impl, buffers, flags, ec); } /// Start an asynchronous receive. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_receive(impl, buffers, flags, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_STREAM_SOCKET_SERVICE_HPP galera-3-25.3.20/asio/asio/basic_deadline_timer.hpp0000644000015300001660000004261613042054732021654 0ustar jenkinsjenkins// // basic_deadline_timer.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_DEADLINE_TIMER_HPP #define ASIO_BASIC_DEADLINE_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/basic_io_object.hpp" #include "asio/deadline_timer_service.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides waitable timer functionality. /** * The basic_deadline_timer class template provides the ability to perform a * blocking or asynchronous wait for a timer to expire. * * A deadline timer is always in one of two states: "expired" or "not expired". * If the wait() or async_wait() function is called on an expired timer, the * wait operation will complete immediately. * * Most applications will use the asio::deadline_timer typedef. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Examples * Performing a blocking wait: * @code * // Construct a timer without setting an expiry time. * asio::deadline_timer timer(io_service); * * // Set an expiry time relative to now. * timer.expires_from_now(boost::posix_time::seconds(5)); * * // Wait for the timer to expire. * timer.wait(); * @endcode * * @par * Performing an asynchronous wait: * @code * void handler(const asio::error_code& error) * { * if (!error) * { * // Timer expired. * } * } * * ... * * // Construct a timer with an absolute expiry time. * asio::deadline_timer timer(io_service, * boost::posix_time::time_from_string("2005-12-07 23:59:59.000")); * * // Start an asynchronous wait. * timer.async_wait(handler); * @endcode * * @par Changing an active deadline_timer's expiry time * * Changing the expiry time of a timer while there are pending asynchronous * waits causes those wait operations to be cancelled. To ensure that the action * associated with the timer is performed only once, use something like this: * used: * * @code * void on_some_event() * { * if (my_timer.expires_from_now(seconds(5)) > 0) * { * // We managed to cancel the timer. Start new asynchronous wait. * my_timer.async_wait(on_timeout); * } * else * { * // Too late, timer has already expired! * } * } * * void on_timeout(const asio::error_code& e) * { * if (e != asio::error::operation_aborted) * { * // Timer was not cancelled, take necessary action. * } * } * @endcode * * @li The asio::basic_deadline_timer::expires_from_now() function * cancels any pending asynchronous waits, and returns the number of * asynchronous waits that were cancelled. If it returns 0 then you were too * late and the wait handler has already been executed, or will soon be * executed. If it returns 1 then the wait handler was successfully cancelled. * * @li If a wait handler is cancelled, the asio::error_code passed to * it contains the value asio::error::operation_aborted. */ template , typename TimerService = deadline_timer_service > class basic_deadline_timer : public basic_io_object { public: /// The time traits type. typedef TimeTraits traits_type; /// The time type. typedef typename traits_type::time_type time_type; /// The duration type. typedef typename traits_type::duration_type duration_type; /// Constructor. /** * This constructor creates a timer without setting an expiry time. The * expires_at() or expires_from_now() functions must be called to set an * expiry time before the timer can be waited on. * * @param io_service The io_service object that the timer will use to dispatch * handlers for any asynchronous operations performed on the timer. */ explicit basic_deadline_timer(asio::io_service& io_service) : basic_io_object(io_service) { } /// Constructor to set a particular expiry time as an absolute time. /** * This constructor creates a timer and sets the expiry time. * * @param io_service The io_service object that the timer will use to dispatch * handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, expressed * as an absolute time. */ basic_deadline_timer(asio::io_service& io_service, const time_type& expiry_time) : basic_io_object(io_service) { asio::error_code ec; this->service.expires_at(this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); } /// Constructor to set a particular expiry time relative to now. /** * This constructor creates a timer and sets the expiry time. * * @param io_service The io_service object that the timer will use to dispatch * handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, relative to * now. */ basic_deadline_timer(asio::io_service& io_service, const duration_type& expiry_time) : basic_io_object(io_service) { asio::error_code ec; this->service.expires_from_now(this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel() { asio::error_code ec; std::size_t s = this->service.cancel(this->implementation, ec); asio::detail::throw_error(ec, "cancel"); return s; } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel(asio::error_code& ec) { return this->service.cancel(this->implementation, ec); } /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one() { asio::error_code ec; std::size_t s = this->service.cancel_one(this->implementation, ec); asio::detail::throw_error(ec, "cancel_one"); return s; } /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one(asio::error_code& ec) { return this->service.cancel_one(this->implementation, ec); } /// Get the timer's expiry time as an absolute time. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ time_type expires_at() const { return this->service.expires_at(this->implementation); } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_type& expiry_time) { asio::error_code ec; std::size_t s = this->service.expires_at( this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); return s; } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_type& expiry_time, asio::error_code& ec) { return this->service.expires_at(this->implementation, expiry_time, ec); } /// Get the timer's expiry time relative to now. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ duration_type expires_from_now() const { return this->service.expires_from_now(this->implementation); } /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration_type& expiry_time) { asio::error_code ec; std::size_t s = this->service.expires_from_now( this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); return s; } /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration_type& expiry_time, asio::error_code& ec) { return this->service.expires_from_now( this->implementation, expiry_time, ec); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @throws asio::system_error Thrown on failure. */ void wait() { asio::error_code ec; this->service.wait(this->implementation, ec); asio::detail::throw_error(ec, "wait"); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @param ec Set to indicate what error occurred, if any. */ void wait(asio::error_code& ec) { this->service.wait(this->implementation, ec); } /// Start an asynchronous wait on the timer. /** * This function may be used to initiate an asynchronous wait against the * timer. It always returns immediately. * * For each call to async_wait(), the supplied handler will be called exactly * once. The handler will be called when: * * @li The timer has expired. * * @li The timer was cancelled, in which case the handler is passed the error * code asio::error::operation_aborted. * * @param handler The handler to be called when the timer expires. Copies * will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(ASIO_MOVE_ARG(WaitHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; return this->service.async_wait(this->implementation, ASIO_MOVE_CAST(WaitHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_BASIC_DEADLINE_TIMER_HPP galera-3-25.3.20/asio/asio/error_code.hpp0000644000015300001660000001033513042054732017662 0ustar jenkinsjenkins// // error_code.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ERROR_CODE_HPP #define ASIO_ERROR_CODE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SYSTEM_ERROR) # include #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) # include # include "asio/detail/noncopyable.hpp" # if !defined(ASIO_NO_IOSTREAM) # include # endif // !defined(ASIO_NO_IOSTREAM) #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_STD_SYSTEM_ERROR) typedef std::error_category error_category; #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// Base class for all error categories. class error_category : private noncopyable { public: /// Destructor. virtual ~error_category() { } /// Returns a string naming the error gategory. virtual const char* name() const = 0; /// Returns a string describing the error denoted by @c value. virtual std::string message(int value) const = 0; /// Equality operator to compare two error categories. bool operator==(const error_category& rhs) const { return this == &rhs; } /// Inequality operator to compare two error categories. bool operator!=(const error_category& rhs) const { return !(*this == rhs); } }; #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// Returns the error category used for the system errors produced by asio. extern ASIO_DECL const error_category& system_category(); #if defined(ASIO_HAS_STD_SYSTEM_ERROR) typedef std::error_code error_code; #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// Class to represent an error code value. class error_code { public: /// Default constructor. error_code() : value_(0), category_(&system_category()) { } /// Construct with specific error code and category. error_code(int v, const error_category& c) : value_(v), category_(&c) { } /// Construct from an error code enum. template error_code(ErrorEnum e) { *this = make_error_code(e); } /// Get the error value. int value() const { return value_; } /// Get the error category. const error_category& category() const { return *category_; } /// Get the message associated with the error. std::string message() const { return category_->message(value_); } struct unspecified_bool_type_t { }; typedef void (*unspecified_bool_type)(unspecified_bool_type_t); static void unspecified_bool_true(unspecified_bool_type_t) {} /// Operator returns non-null if there is a non-success error code. operator unspecified_bool_type() const { if (value_ == 0) return 0; else return &error_code::unspecified_bool_true; } /// Operator to test if the error represents success. bool operator!() const { return value_ == 0; } /// Equality operator to compare two error objects. friend bool operator==(const error_code& e1, const error_code& e2) { return e1.value_ == e2.value_ && e1.category_ == e2.category_; } /// Inequality operator to compare two error objects. friend bool operator!=(const error_code& e1, const error_code& e2) { return e1.value_ != e2.value_ || e1.category_ != e2.category_; } private: // The value associated with the error code. int value_; // The category associated with the error code. const error_category* category_; }; # if !defined(ASIO_NO_IOSTREAM) /// Output an error code. template std::basic_ostream& operator<<( std::basic_ostream& os, const error_code& ec) { os << ec.category().name() << ':' << ec.value(); return os; } # endif // !defined(ASIO_NO_IOSTREAM) #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/error_code.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_ERROR_CODE_HPP galera-3-25.3.20/asio/asio/signal_set.hpp0000644000015300001660000000124713042054732017671 0ustar jenkinsjenkins// // signal_set.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SIGNAL_SET_HPP #define ASIO_SIGNAL_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_signal_set.hpp" namespace asio { /// Typedef for the typical usage of a signal set. typedef basic_signal_set<> signal_set; } // namespace asio #endif // ASIO_SIGNAL_SET_HPP galera-3-25.3.20/asio/asio/basic_seq_packet_socket.hpp0000644000015300001660000005324713042054732022400 0ustar jenkinsjenkins// // basic_seq_packet_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SEQ_PACKET_SOCKET_HPP #define ASIO_BASIC_SEQ_PACKET_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/seq_packet_socket_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides sequenced packet socket functionality. /** * The basic_seq_packet_socket class template provides asynchronous and blocking * sequenced packet socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template > class basic_seq_packet_socket : public basic_socket { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename SeqPacketSocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename SeqPacketSocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_seq_packet_socket without opening it. /** * This constructor creates a sequenced packet socket without opening it. The * socket needs to be opened and then connected or accepted before data can * be sent or received on it. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. */ explicit basic_seq_packet_socket(asio::io_service& io_service) : basic_socket(io_service) { } /// Construct and open a basic_seq_packet_socket. /** * This constructor creates and opens a sequenced_packet socket. The socket * needs to be connected or accepted before data can be sent or received on * it. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_socket(io_service, protocol) { } /// Construct a basic_seq_packet_socket, opening it and binding it to the /// given local endpoint. /** * This constructor creates a sequenced packet socket and automatically opens * it bound to the specified endpoint on the local machine. The protocol used * is the protocol associated with the given endpoint. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. * * @param endpoint An endpoint on the local machine to which the sequenced * packet socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_socket(io_service, endpoint) { } /// Construct a basic_seq_packet_socket on an existing native socket. /** * This constructor creates a sequenced packet socket object to hold an * existing native socket. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket( io_service, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_seq_packet_socket from another. /** * This constructor moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ basic_seq_packet_socket(basic_seq_packet_socket&& other) : basic_socket( ASIO_MOVE_CAST(basic_seq_packet_socket)(other)) { } /// Move-assign a basic_seq_packet_socket from another. /** * This assignment operator moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ basic_seq_packet_socket& operator=(basic_seq_packet_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST(basic_seq_packet_socket)(other)); return *this; } /// Move-construct a basic_seq_packet_socket from a socket of another protocol /// type. /** * This constructor moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ template basic_seq_packet_socket( basic_seq_packet_socket&& other, typename enable_if::value>::type* = 0) : basic_socket( ASIO_MOVE_CAST2(basic_seq_packet_socket< Protocol1, SeqPacketSocketService1>)(other)) { } /// Move-assign a basic_seq_packet_socket from a socket of another protocol /// type. /** * This assignment operator moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ template typename enable_if::value, basic_seq_packet_socket>::type& operator=( basic_seq_packet_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST2(basic_seq_packet_socket< Protocol1, SeqPacketSocketService1>)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Send some data on the socket. /** * This function is used to send data on the sequenced packet socket. The * function call will block until the data has been sent successfully, or an * until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the sequenced packet socket. The * function call will block the data has been sent successfully, or an until * error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. Returns 0 if an error occurred. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the sequenced packet * socket. The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Receive some data on the socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), out_flags); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags& out_flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, out_flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on the socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), 0, out_flags); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, in_flags, out_flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. Returns 0 if an error occurred. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { return this->get_service().receive(this->get_implementation(), buffers, in_flags, out_flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the sequenced * packet socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param out_flags Once the asynchronous operation completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. The caller must guarantee that the referenced * variable remains valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), out_flags, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive( this->get_implementation(), buffers, 0, out_flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the sequenced * data socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags Once the asynchronous operation completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. The caller must guarantee that the referenced * variable remains valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive( * asio::buffer(data, size), * 0, out_flags, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive( this->get_implementation(), buffers, in_flags, out_flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SEQ_PACKET_SOCKET_HPP galera-3-25.3.20/asio/asio/read_at.hpp0000644000015300001660000006176613042054732017154 0ustar jenkinsjenkins// // read_at.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_READ_AT_HPP #define ASIO_READ_AT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/basic_streambuf_fwd.hpp" #include "asio/detail/cstdint.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup read_at asio::read_at * * @brief Attempt to read a certain amount of data at the specified offset * before returning. */ /*@{*/ /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, buffers, * asio::transfer_all()); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, * asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec); #if !defined(ASIO_NO_IOSTREAM) /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, b, * asio::transfer_all()); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, b, * asio::transfer_all(), ec); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, asio::error_code& ec); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) /*@}*/ /** * @defgroup async_read_at asio::async_read_at * * @brief Start an asynchronous operation to read a certain amount of data at * the specified offset. */ /*@{*/ /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * asio::async_read_at(d, 42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::async_read_at( * d, 42, buffers, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's async_read_some_at function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::async_read_at(d, 42, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note This overload is equivalent to calling: * @code asio::async_read_at( * d, 42, b, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's async_read_some_at function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/read_at.hpp" #endif // ASIO_READ_AT_HPP galera-3-25.3.20/asio/asio/buffered_read_stream_fwd.hpp0000644000015300001660000000120213042054732022520 0ustar jenkinsjenkins// // buffered_read_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_READ_STREAM_FWD_HPP #define ASIO_BUFFERED_READ_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_read_stream; } // namespace asio #endif // ASIO_BUFFERED_READ_STREAM_FWD_HPP galera-3-25.3.20/asio/asio/windows/0000755000015300001660000000000013042054732016516 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/windows/overlapped_ptr.hpp0000644000015300001660000000536713042054732022270 0ustar jenkinsjenkins// // windows/overlapped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_OVERLAPPED_PTR_HPP #define ASIO_WINDOWS_OVERLAPPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/noncopyable.hpp" #include "asio/detail/win_iocp_overlapped_ptr.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Wraps a handler to create an OVERLAPPED object for use with overlapped I/O. /** * A special-purpose smart pointer used to wrap an application handler so that * it can be passed as the LPOVERLAPPED argument to overlapped I/O functions. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class overlapped_ptr : private noncopyable { public: /// Construct an empty overlapped_ptr. overlapped_ptr() : impl_() { } /// Construct an overlapped_ptr to contain the specified handler. template explicit overlapped_ptr(asio::io_service& io_service, ASIO_MOVE_ARG(Handler) handler) : impl_(io_service, ASIO_MOVE_CAST(Handler)(handler)) { } /// Destructor automatically frees the OVERLAPPED object unless released. ~overlapped_ptr() { } /// Reset to empty. void reset() { impl_.reset(); } /// Reset to contain the specified handler, freeing any current OVERLAPPED /// object. template void reset(asio::io_service& io_service, ASIO_MOVE_ARG(Handler) handler) { impl_.reset(io_service, ASIO_MOVE_CAST(Handler)(handler)); } /// Get the contained OVERLAPPED object. OVERLAPPED* get() { return impl_.get(); } /// Get the contained OVERLAPPED object. const OVERLAPPED* get() const { return impl_.get(); } /// Release ownership of the OVERLAPPED object. OVERLAPPED* release() { return impl_.release(); } /// Post completion notification for overlapped operation. Releases ownership. void complete(const asio::error_code& ec, std::size_t bytes_transferred) { impl_.complete(ec, bytes_transferred); } private: detail::win_iocp_overlapped_ptr impl_; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_OVERLAPPED_PTR_HPP galera-3-25.3.20/asio/asio/windows/random_access_handle.hpp0000644000015300001660000000204513042054732023344 0ustar jenkinsjenkins// // windows/random_access_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP #define ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_random_access_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of a random-access handle. typedef basic_random_access_handle<> random_access_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP galera-3-25.3.20/asio/asio/windows/basic_stream_handle.hpp0000644000015300001660000003260313042054732023202 0ustar jenkinsjenkins// // windows/basic_stream_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP #define ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/windows/basic_handle.hpp" #include "asio/windows/stream_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides stream-oriented handle functionality. /** * The windows::basic_stream_handle class template provides asynchronous and * blocking stream-oriented handle functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class basic_stream_handle : public basic_handle { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// handle. typedef typename StreamHandleService::native_handle_type native_type; /// The native representation of a handle. typedef typename StreamHandleService::native_handle_type native_handle_type; /// Construct a basic_stream_handle without opening it. /** * This constructor creates a stream handle without opening it. The handle * needs to be opened and then connected or accepted before data can be sent * or received on it. * * @param io_service The io_service object that the stream handle will use to * dispatch handlers for any asynchronous operations performed on the handle. */ explicit basic_stream_handle(asio::io_service& io_service) : basic_handle(io_service) { } /// Construct a basic_stream_handle on an existing native handle. /** * This constructor creates a stream handle object to hold an existing native * handle. * * @param io_service The io_service object that the stream handle will use to * dispatch handlers for any asynchronous operations performed on the handle. * * @param handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_stream_handle(asio::io_service& io_service, const native_handle_type& handle) : basic_handle(io_service, handle) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_stream_handle from another. /** * This constructor moves a stream handle from one object to another. * * @param other The other basic_stream_handle object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_handle(io_service&) constructor. */ basic_stream_handle(basic_stream_handle&& other) : basic_handle( ASIO_MOVE_CAST(basic_stream_handle)(other)) { } /// Move-assign a basic_stream_handle from another. /** * This assignment operator moves a stream handle from one object to * another. * * @param other The other basic_stream_handle object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_handle(io_service&) constructor. */ basic_stream_handle& operator=(basic_stream_handle&& other) { basic_handle::operator=( ASIO_MOVE_CAST(basic_stream_handle)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Write some data to the handle. /** * This function is used to write data to the stream handle. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the handle. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.write_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().write_some( this->get_implementation(), buffers, ec); asio::detail::throw_error(ec, "write_some"); return s; } /// Write some data to the handle. /** * This function is used to write data to the stream handle. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the handle. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return this->get_service().write_some( this->get_implementation(), buffers, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write data to the stream handle. * The function call always returns immediately. * * @param buffers One or more data buffers to be written to the handle. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.async_write_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_write_some(this->get_implementation(), buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Read some data from the handle. /** * This function is used to read data from the stream handle. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.read_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().read_some( this->get_implementation(), buffers, ec); asio::detail::throw_error(ec, "read_some"); return s; } /// Read some data from the handle. /** * This function is used to read data from the stream handle. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return this->get_service().read_some( this->get_implementation(), buffers, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read data from the stream handle. * The function call always returns immediately. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read function if you need to ensure that the * requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.async_read_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_read_some(this->get_implementation(), buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP galera-3-25.3.20/asio/asio/windows/object_handle_service.hpp0000644000015300001660000001153313042054732023533 0ustar jenkinsjenkins// // windows/object_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_OBJECT_HANDLE_SERVICE_HPP #define ASIO_WINDOWS_OBJECT_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/async_result.hpp" #include "asio/detail/win_object_handle_service.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Default service implementation for an object handle. class object_handle_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif private: // The type of the platform-specific implementation. typedef detail::win_object_handle_service service_impl_type; public: /// The type of an object handle implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef service_impl_type::implementation_type implementation_type; #endif /// The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new object handle service for the specified io_service. explicit object_handle_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(io_service) { } /// Construct a new object handle implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new object handle implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another object handle implementation. void move_assign(implementation_type& impl, object_handle_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy an object handle implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Assign an existing native handle to an object handle. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return service_impl_.assign(impl, handle, ec); } /// Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close an object handle implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// Get the native handle implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } // Wait for a signaled state. void wait(implementation_type& impl, asio::error_code& ec) { service_impl_.wait(impl, ec); } /// Start an asynchronous wait. template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(implementation_type& impl, ASIO_MOVE_ARG(WaitHandler) handler) { asio::detail::async_result_init< WaitHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(WaitHandler)(handler)); service_impl_.async_wait(impl, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_OBJECT_HANDLE_SERVICE_HPP galera-3-25.3.20/asio/asio/windows/stream_handle_service.hpp0000644000015300001660000001437613042054732023570 0ustar jenkinsjenkins// // windows/stream_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_STREAM_HANDLE_SERVICE_HPP #define ASIO_WINDOWS_STREAM_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/async_result.hpp" #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Default service implementation for a stream handle. class stream_handle_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif private: // The type of the platform-specific implementation. typedef detail::win_iocp_handle_service service_impl_type; public: /// The type of a stream handle implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef service_impl_type::native_handle_type native_type; #endif /// The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new stream handle service for the specified io_service. explicit stream_handle_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(io_service) { } /// Construct a new stream handle implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new stream handle implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another stream handle implementation. void move_assign(implementation_type& impl, stream_handle_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a stream handle implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Assign an existing native handle to a stream handle. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return service_impl_.assign(impl, handle, ec); } /// Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a stream handle implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native handle implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native handle implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Write the given data to the stream. template std::size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return service_impl_.write_some(impl, buffers, ec); } /// Start an asynchronous write. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { asio::detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_write_some(impl, buffers, init.handler); return init.result.get(); } /// Read some data from the stream. template std::size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.read_some(impl, buffers, ec); } /// Start an asynchronous read. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { asio::detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_read_some(impl, buffers, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_STREAM_HANDLE_SERVICE_HPP galera-3-25.3.20/asio/asio/windows/object_handle.hpp0000644000015300001660000000202413042054732022006 0ustar jenkinsjenkins// // windows/object_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_OBJECT_HANDLE_HPP #define ASIO_WINDOWS_OBJECT_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_object_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of an object handle. typedef basic_object_handle<> object_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_OBJECT_HANDLE_HPP galera-3-25.3.20/asio/asio/windows/basic_handle.hpp0000644000015300001660000002116113042054732021624 0ustar jenkinsjenkins// // windows/basic_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_HANDLE_HPP #define ASIO_WINDOWS_BASIC_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_io_object.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides Windows handle functionality. /** * The windows::basic_handle class template provides the ability to wrap a * Windows handle. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_handle : public basic_io_object { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// handle. typedef typename HandleService::native_handle_type native_type; /// The native representation of a handle. typedef typename HandleService::native_handle_type native_handle_type; /// A basic_handle is always the lowest layer. typedef basic_handle lowest_layer_type; /// Construct a basic_handle without opening it. /** * This constructor creates a handle without opening it. * * @param io_service The io_service object that the handle will use to * dispatch handlers for any asynchronous operations performed on the handle. */ explicit basic_handle(asio::io_service& io_service) : basic_io_object(io_service) { } /// Construct a basic_handle on an existing native handle. /** * This constructor creates a handle object to hold an existing native handle. * * @param io_service The io_service object that the handle will use to * dispatch handlers for any asynchronous operations performed on the handle. * * @param handle A native handle. * * @throws asio::system_error Thrown on failure. */ basic_handle(asio::io_service& io_service, const native_handle_type& handle) : basic_io_object(io_service) { asio::error_code ec; this->get_service().assign(this->get_implementation(), handle, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_handle from another. /** * This constructor moves a handle from one object to another. * * @param other The other basic_handle object from which the move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_handle(io_service&) constructor. */ basic_handle(basic_handle&& other) : basic_io_object( ASIO_MOVE_CAST(basic_handle)(other)) { } /// Move-assign a basic_handle from another. /** * This assignment operator moves a handle from one object to another. * * @param other The other basic_handle object from which the move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_handle(io_service&) constructor. */ basic_handle& operator=(basic_handle&& other) { basic_io_object::operator=( ASIO_MOVE_CAST(basic_handle)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a basic_handle cannot contain any further layers, it simply * returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a basic_handle cannot contain any further layers, it simply * returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Assign an existing native handle to the handle. /* * This function opens the handle to hold an existing native handle. * * @param handle A native handle. * * @throws asio::system_error Thrown on failure. */ void assign(const native_handle_type& handle) { asio::error_code ec; this->get_service().assign(this->get_implementation(), handle, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native handle to the handle. /* * This function opens the handle to hold an existing native handle. * * @param handle A native handle. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code assign(const native_handle_type& handle, asio::error_code& ec) { return this->get_service().assign(this->get_implementation(), handle, ec); } /// Determine whether the handle is open. bool is_open() const { return this->get_service().is_open(this->get_implementation()); } /// Close the handle. /** * This function is used to close the handle. Any asynchronous read or write * operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void close() { asio::error_code ec; this->get_service().close(this->get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the handle. /** * This function is used to close the handle. Any asynchronous read or write * operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code close(asio::error_code& ec) { return this->get_service().close(this->get_implementation(), ec); } /// (Deprecated: Use native_handle().) Get the native handle representation. /** * This function may be used to obtain the underlying representation of the * handle. This is intended to allow access to native handle functionality * that is not otherwise provided. */ native_type native() { return this->get_service().native_handle(this->get_implementation()); } /// Get the native handle representation. /** * This function may be used to obtain the underlying representation of the * handle. This is intended to allow access to native handle functionality * that is not otherwise provided. */ native_handle_type native_handle() { return this->get_service().native_handle(this->get_implementation()); } /// Cancel all asynchronous operations associated with the handle. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void cancel() { asio::error_code ec; this->get_service().cancel(this->get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the handle. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code cancel(asio::error_code& ec) { return this->get_service().cancel(this->get_implementation(), ec); } protected: /// Protected destructor to prevent deletion through this type. ~basic_handle() { } }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_HANDLE_HPP galera-3-25.3.20/asio/asio/windows/basic_random_access_handle.hpp0000644000015300001660000003457113042054732024516 0ustar jenkinsjenkins// // windows/basic_random_access_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP #define ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/windows/basic_handle.hpp" #include "asio/windows/random_access_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides random-access handle functionality. /** * The windows::basic_random_access_handle class template provides asynchronous * and blocking random-access handle functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_random_access_handle : public basic_handle { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// handle. typedef typename RandomAccessHandleService::native_handle_type native_type; /// The native representation of a handle. typedef typename RandomAccessHandleService::native_handle_type native_handle_type; /// Construct a basic_random_access_handle without opening it. /** * This constructor creates a random-access handle without opening it. The * handle needs to be opened before data can be written to or read from it. * * @param io_service The io_service object that the random-access handle will * use to dispatch handlers for any asynchronous operations performed on the * handle. */ explicit basic_random_access_handle(asio::io_service& io_service) : basic_handle(io_service) { } /// Construct a basic_random_access_handle on an existing native handle. /** * This constructor creates a random-access handle object to hold an existing * native handle. * * @param io_service The io_service object that the random-access handle will * use to dispatch handlers for any asynchronous operations performed on the * handle. * * @param handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_random_access_handle(asio::io_service& io_service, const native_handle_type& handle) : basic_handle(io_service, handle) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_random_access_handle from another. /** * This constructor moves a random-access handle from one object to another. * * @param other The other basic_random_access_handle object from which the * move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_random_access_handle(io_service&) * constructor. */ basic_random_access_handle(basic_random_access_handle&& other) : basic_handle( ASIO_MOVE_CAST(basic_random_access_handle)(other)) { } /// Move-assign a basic_random_access_handle from another. /** * This assignment operator moves a random-access handle from one object to * another. * * @param other The other basic_random_access_handle object from which the * move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_random_access_handle(io_service&) * constructor. */ basic_random_access_handle& operator=(basic_random_access_handle&& other) { basic_handle::operator=( ASIO_MOVE_CAST(basic_random_access_handle)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Write some data to the handle at the specified offset. /** * This function is used to write data to the random-access handle. The * function call will block until one or more bytes of the data has been * written successfully, or until an error occurs. * * @param offset The offset at which the data will be written. * * @param buffers One or more data buffers to be written to the handle. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some_at operation may not write all of the data. Consider * using the @ref write_at function if you need to ensure that all data is * written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.write_some_at(42, asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some_at(uint64_t offset, const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().write_some_at( this->get_implementation(), offset, buffers, ec); asio::detail::throw_error(ec, "write_some_at"); return s; } /// Write some data to the handle at the specified offset. /** * This function is used to write data to the random-access handle. The * function call will block until one or more bytes of the data has been * written successfully, or until an error occurs. * * @param offset The offset at which the data will be written. * * @param buffers One or more data buffers to be written to the handle. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write_at function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some_at(uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { return this->get_service().write_some_at( this->get_implementation(), offset, buffers, ec); } /// Start an asynchronous write at the specified offset. /** * This function is used to asynchronously write data to the random-access * handle. The function call always returns immediately. * * @param offset The offset at which the data will be written. * * @param buffers One or more data buffers to be written to the handle. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write_at function if you need to ensure that * all data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.async_write_some_at(42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some_at(uint64_t offset, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_write_some_at(this->get_implementation(), offset, buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Read some data from the handle at the specified offset. /** * This function is used to read data from the random-access handle. The * function call will block until one or more bytes of data has been read * successfully, or until an error occurs. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read_at function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.read_some_at(42, asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some_at(uint64_t offset, const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().read_some_at( this->get_implementation(), offset, buffers, ec); asio::detail::throw_error(ec, "read_some_at"); return s; } /// Read some data from the handle at the specified offset. /** * This function is used to read data from the random-access handle. The * function call will block until one or more bytes of data has been read * successfully, or until an error occurs. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read_at function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some_at(uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { return this->get_service().read_some_at( this->get_implementation(), offset, buffers, ec); } /// Start an asynchronous read at the specified offset. /** * This function is used to asynchronously read data from the random-access * handle. The function call always returns immediately. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read_at function if you need to ensure that * the requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.async_read_some_at(42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some_at(uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_read_some_at(this->get_implementation(), offset, buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP galera-3-25.3.20/asio/asio/windows/random_access_handle_service.hpp0000644000015300001660000001517513042054732025074 0ustar jenkinsjenkins// // windows/random_access_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_SERVICE_HPP #define ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/async_result.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Default service implementation for a random-access handle. class random_access_handle_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif private: // The type of the platform-specific implementation. typedef detail::win_iocp_handle_service service_impl_type; public: /// The type of a random-access handle implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef service_impl_type::native_handle_type native_type; #endif /// The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new random-access handle service for the specified io_service. explicit random_access_handle_service(asio::io_service& io_service) : asio::detail::service_base< random_access_handle_service>(io_service), service_impl_(io_service) { } /// Construct a new random-access handle implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new random-access handle implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another random-access handle implementation. void move_assign(implementation_type& impl, random_access_handle_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a random-access handle implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Assign an existing native handle to a random-access handle. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return service_impl_.assign(impl, handle, ec); } /// Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a random-access handle implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native handle implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native handle implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Write the given data at the specified offset. template std::size_t write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { return service_impl_.write_some_at(impl, offset, buffers, ec); } /// Start an asynchronous write at the specified offset. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { asio::detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_write_some_at(impl, offset, buffers, init.handler); return init.result.get(); } /// Read some data from the specified offset. template std::size_t read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.read_some_at(impl, offset, buffers, ec); } /// Start an asynchronous read at the specified offset. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { asio::detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_read_some_at(impl, offset, buffers, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_SERVICE_HPP galera-3-25.3.20/asio/asio/windows/basic_object_handle.hpp0000644000015300001660000001360613042054732023157 0ustar jenkinsjenkins// // windows/basic_object_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP #define ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/windows/basic_handle.hpp" #include "asio/windows/object_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides object-oriented handle functionality. /** * The windows::basic_object_handle class template provides asynchronous and * blocking object-oriented handle functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_object_handle : public basic_handle { public: /// The native representation of a handle. typedef typename ObjectHandleService::native_handle_type native_handle_type; /// Construct a basic_object_handle without opening it. /** * This constructor creates an object handle without opening it. * * @param io_service The io_service object that the object handle will use to * dispatch handlers for any asynchronous operations performed on the handle. */ explicit basic_object_handle(asio::io_service& io_service) : basic_handle(io_service) { } /// Construct a basic_object_handle on an existing native handle. /** * This constructor creates an object handle object to hold an existing native * handle. * * @param io_service The io_service object that the object handle will use to * dispatch handlers for any asynchronous operations performed on the handle. * * @param native_handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_object_handle(asio::io_service& io_service, const native_handle_type& native_handle) : basic_handle(io_service, native_handle) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_object_handle from another. /** * This constructor moves an object handle from one object to another. * * @param other The other basic_object_handle object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_object_handle(io_service&) constructor. */ basic_object_handle(basic_object_handle&& other) : basic_handle( ASIO_MOVE_CAST(basic_object_handle)(other)) { } /// Move-assign a basic_object_handle from another. /** * This assignment operator moves an object handle from one object to another. * * @param other The other basic_object_handle object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_object_handle(io_service&) constructor. */ basic_object_handle& operator=(basic_object_handle&& other) { basic_handle::operator=( ASIO_MOVE_CAST(basic_object_handle)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Perform a blocking wait on the object handle. /** * This function is used to wait for the object handle to be set to the * signalled state. This function blocks and does not return until the object * handle has been set to the signalled state. * * @throws asio::system_error Thrown on failure. */ void wait() { asio::error_code ec; this->get_service().wait(this->get_implementation(), ec); asio::detail::throw_error(ec, "wait"); } /// Perform a blocking wait on the object handle. /** * This function is used to wait for the object handle to be set to the * signalled state. This function blocks and does not return until the object * handle has been set to the signalled state. * * @param ec Set to indicate what error occurred, if any. */ void wait(asio::error_code& ec) { this->get_service().wait(this->get_implementation(), ec); } /// Start an asynchronous wait on the object handle. /** * This function is be used to initiate an asynchronous wait against the * object handle. It always returns immediately. * * @param handler The handler to be called when the object handle is set to * the signalled state. Copies will be made of the handler as required. The * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(ASIO_MOVE_ARG(WaitHandler) handler) { return this->get_service().async_wait(this->get_implementation(), ASIO_MOVE_CAST(WaitHandler)(handler)); } }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP galera-3-25.3.20/asio/asio/windows/stream_handle.hpp0000644000015300001660000000174113042054732022040 0ustar jenkinsjenkins// // windows/stream_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_STREAM_HANDLE_HPP #define ASIO_WINDOWS_STREAM_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_stream_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of a stream-oriented handle. typedef basic_stream_handle<> stream_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_STREAM_HANDLE_HPP galera-3-25.3.20/asio/asio/basic_raw_socket.hpp0000644000015300001660000011100113042054732021031 0ustar jenkinsjenkins// // basic_raw_socket.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_RAW_SOCKET_HPP #define ASIO_BASIC_RAW_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/raw_socket_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides raw-oriented socket functionality. /** * The basic_raw_socket class template provides asynchronous and blocking * raw-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template > class basic_raw_socket : public basic_socket { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename RawSocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename RawSocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_raw_socket without opening it. /** * This constructor creates a raw socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. */ explicit basic_raw_socket(asio::io_service& io_service) : basic_socket(io_service) { } /// Construct and open a basic_raw_socket. /** * This constructor creates and opens a raw socket. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_socket(io_service, protocol) { } /// Construct a basic_raw_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a raw socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param endpoint An endpoint on the local machine to which the raw * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_socket(io_service, endpoint) { } /// Construct a basic_raw_socket on an existing native socket. /** * This constructor creates a raw socket object to hold an existing * native socket. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket( io_service, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_raw_socket from another. /** * This constructor moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ basic_raw_socket(basic_raw_socket&& other) : basic_socket( ASIO_MOVE_CAST(basic_raw_socket)(other)) { } /// Move-assign a basic_raw_socket from another. /** * This assignment operator moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ basic_raw_socket& operator=(basic_raw_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST(basic_raw_socket)(other)); return *this; } /// Move-construct a basic_raw_socket from a socket of another protocol type. /** * This constructor moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ template basic_raw_socket(basic_raw_socket&& other, typename enable_if::value>::type* = 0) : basic_socket( ASIO_MOVE_CAST2(basic_raw_socket< Protocol1, RawSocketService1>)(other)) { } /// Move-assign a basic_raw_socket from a socket of another protocol type. /** * This assignment operator moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ template typename enable_if::value, basic_raw_socket>::type& operator=( basic_raw_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST2(basic_raw_socket< Protocol1, RawSocketService1>)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code socket.send(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous send on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected raw * socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected raw * socket. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.send_to(asio::buffer(data, size), destination); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination) { asio::error_code ec; std::size_t s = this->get_service().send_to( this->get_implementation(), buffers, destination, 0, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send_to( this->get_implementation(), buffers, destination, flags, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send_to(this->get_implementation(), buffers, destination, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send raw data to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_send_to( * asio::buffer(data, size), destination, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send_to(this->get_implementation(), buffers, destination, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send. /** * This function is used to asynchronously send raw data to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send_to( this->get_implementation(), buffers, destination, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.receive(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the raw * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * raw socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the raw * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * raw socket. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * asio::ip::udp::endpoint sender_endpoint; * socket.receive_from( * asio::buffer(data, size), sender_endpoint); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint) { asio::error_code ec; std::size_t s = this->get_service().receive_from( this->get_implementation(), buffers, sender_endpoint, 0, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive_from( this->get_implementation(), buffers, sender_endpoint, flags, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive_from(this->get_implementation(), buffers, sender_endpoint, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive raw data. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.async_receive_from( * asio::buffer(data, size), 0, sender_endpoint, handler); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive_from( this->get_implementation(), buffers, sender_endpoint, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive raw data. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive_from( this->get_implementation(), buffers, sender_endpoint, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_RAW_SOCKET_HPP galera-3-25.3.20/asio/asio/generic/0000755000015300001660000000000013042054732016440 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/generic/detail/0000755000015300001660000000000013042054732017702 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/generic/detail/endpoint.hpp0000644000015300001660000000616513042054732022243 0ustar jenkinsjenkins// // generic/detail/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_DETAIL_ENDPOINT_HPP #define ASIO_GENERIC_DETAIL_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { namespace detail { // Helper class for implementing a generic socket endpoint. class endpoint { public: // Default constructor. ASIO_DECL endpoint(); // Construct an endpoint from the specified raw bytes. ASIO_DECL endpoint(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol); // Copy constructor. endpoint(const endpoint& other) : data_(other.data_), size_(other.size_), protocol_(other.protocol_) { } // Assign from another endpoint. endpoint& operator=(const endpoint& other) { data_ = other.data_; size_ = other.size_; protocol_ = other.protocol_; return *this; } // Get the address family associated with the endpoint. int family() const { return data_.base.sa_family; } // Get the socket protocol associated with the endpoint. int protocol() const { return protocol_; } // Get the underlying endpoint in the native type. asio::detail::socket_addr_type* data() { return &data_.base; } // Get the underlying endpoint in the native type. const asio::detail::socket_addr_type* data() const { return &data_.base; } // Get the underlying size of the endpoint in the native type. std::size_t size() const { return size_; } // Set the underlying size of the endpoint in the native type. ASIO_DECL void resize(std::size_t size); // Get the capacity of the endpoint in the native type. std::size_t capacity() const { return sizeof(asio::detail::sockaddr_storage_type); } // Compare two endpoints for equality. ASIO_DECL friend bool operator==( const endpoint& e1, const endpoint& e2); // Compare endpoints for ordering. ASIO_DECL friend bool operator<( const endpoint& e1, const endpoint& e2); private: // The underlying socket address. union data_union { asio::detail::socket_addr_type base; asio::detail::sockaddr_storage_type generic; } data_; // The length of the socket address stored in the endpoint. std::size_t size_; // The socket protocol associated with the endpoint. int protocol_; // Initialise with a specified memory. ASIO_DECL void init(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol); }; } // namespace detail } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/generic/detail/impl/endpoint.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_GENERIC_DETAIL_ENDPOINT_HPP galera-3-25.3.20/asio/asio/generic/detail/impl/0000755000015300001660000000000013042054732020643 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/generic/detail/impl/endpoint.ipp0000644000015300001660000000512613042054732023201 0ustar jenkinsjenkins// // generic/detail/impl/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP #define ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/error.hpp" #include "asio/generic/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { namespace detail { endpoint::endpoint() { init(0, 0, 0); } endpoint::endpoint(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol) { init(sock_addr, sock_addr_size, sock_protocol); } void endpoint::resize(std::size_t new_size) { if (new_size > sizeof(asio::detail::sockaddr_storage_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } else { size_ = new_size; protocol_ = 0; } } bool operator==(const endpoint& e1, const endpoint& e2) { using namespace std; // For memcmp. return e1.size() == e2.size() && memcmp(e1.data(), e2.data(), e1.size()) == 0; } bool operator<(const endpoint& e1, const endpoint& e2) { if (e1.protocol() < e2.protocol()) return true; if (e1.protocol() > e2.protocol()) return false; using namespace std; // For memcmp. std::size_t compare_size = e1.size() < e2.size() ? e1.size() : e2.size(); int compare_result = memcmp(e1.data(), e2.data(), compare_size); if (compare_result < 0) return true; if (compare_result > 0) return false; return e1.size() < e2.size(); } void endpoint::init(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol) { if (sock_addr_size > sizeof(asio::detail::sockaddr_storage_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } using namespace std; // For memset and memcpy. memset(&data_.generic, 0, sizeof(asio::detail::sockaddr_storage_type)); memcpy(&data_.generic, sock_addr, sock_addr_size); size_ = sock_addr_size; protocol_ = sock_protocol; } } // namespace detail } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP galera-3-25.3.20/asio/asio/generic/seq_packet_protocol.hpp0000644000015300001660000000605513042054732023217 0ustar jenkinsjenkins// // generic/seq_packet_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP #define ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_seq_packet_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic sequenced packet socket. /** * The asio::generic::seq_packet_protocol class contains flags necessary * for seq_packet-oriented sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code seq_packet_protocol p(AF_INET, IPPROTO_SCTP); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class seq_packet_protocol { public: /// Construct a protocol object for a specific address family and protocol. seq_packet_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not based around * sequenced packets. */ template seq_packet_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_SEQPACKET); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const seq_packet_protocol& p1, const seq_packet_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const seq_packet_protocol& p1, const seq_packet_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_seq_packet_socket socket; private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP galera-3-25.3.20/asio/asio/generic/basic_endpoint.hpp0000644000015300001660000001113213042054732022130 0ustar jenkinsjenkins// // generic/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_BASIC_ENDPOINT_HPP #define ASIO_GENERIC_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/generic/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Describes an endpoint for any socket type. /** * The asio::generic::basic_endpoint class template describes an endpoint * that may be associated with any socket type. * * @note The socket types sockaddr type must be able to fit into a * @c sockaddr_storage structure. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * Endpoint. */ template class basic_endpoint { public: /// The protocol type associated with the endpoint. typedef Protocol protocol_type; /// The type of the endpoint structure. This type is dependent on the /// underlying implementation of the socket layer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined data_type; #else typedef asio::detail::socket_addr_type data_type; #endif /// Default constructor. basic_endpoint() { } /// Construct an endpoint from the specified socket address. basic_endpoint(const void* socket_address, std::size_t socket_address_size, int socket_protocol = 0) : impl_(socket_address, socket_address_size, socket_protocol) { } /// Construct an endpoint from the specific endpoint type. template basic_endpoint(const Endpoint& endpoint) : impl_(endpoint.data(), endpoint.size(), endpoint.protocol().protocol()) { } /// Copy constructor. basic_endpoint(const basic_endpoint& other) : impl_(other.impl_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_endpoint(basic_endpoint&& other) : impl_(other.impl_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another endpoint. basic_endpoint& operator=(const basic_endpoint& other) { impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another endpoint. basic_endpoint& operator=(basic_endpoint&& other) { impl_ = other.impl_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// The protocol associated with the endpoint. protocol_type protocol() const { return protocol_type(impl_.family(), impl_.protocol()); } /// Get the underlying endpoint in the native type. data_type* data() { return impl_.data(); } /// Get the underlying endpoint in the native type. const data_type* data() const { return impl_.data(); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const { return impl_.size(); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t new_size) { impl_.resize(new_size); } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const { return impl_.capacity(); } /// Compare two endpoints for equality. friend bool operator==(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ == e2.impl_; } /// Compare two endpoints for inequality. friend bool operator!=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1.impl_ == e2.impl_); } /// Compare endpoints for ordering. friend bool operator<(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ < e2.impl_; } /// Compare endpoints for ordering. friend bool operator>(const basic_endpoint& e1, const basic_endpoint& e2) { return e2.impl_ < e1.impl_; } /// Compare endpoints for ordering. friend bool operator<=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e2 < e1); } /// Compare endpoints for ordering. friend bool operator>=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1 < e2); } private: // The underlying generic endpoint. asio::generic::detail::endpoint impl_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_BASIC_ENDPOINT_HPP galera-3-25.3.20/asio/asio/generic/datagram_protocol.hpp0000644000015300001660000000613413042054732022656 0ustar jenkinsjenkins// // generic/datagram_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP #define ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_datagram_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic datagram-oriented socket. /** * The asio::generic::datagram_protocol class contains flags necessary * for datagram-oriented sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code datagram_protocol p(AF_INET, IPPROTO_UDP); @endcode * Constructing from a specific protocol type: * @code datagram_protocol p(asio::ip::udp::v4()); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class datagram_protocol { public: /// Construct a protocol object for a specific address family and protocol. datagram_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not datagram-oriented. */ template datagram_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_DGRAM); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const datagram_protocol& p1, const datagram_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const datagram_protocol& p1, const datagram_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_datagram_socket socket; private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP galera-3-25.3.20/asio/asio/generic/stream_protocol.hpp0000644000015300001660000000636713042054732022401 0ustar jenkinsjenkins// // generic/stream_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_STREAM_PROTOCOL_HPP #define ASIO_GENERIC_STREAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket_iostream.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic stream-oriented socket. /** * The asio::generic::stream_protocol class contains flags necessary for * stream-oriented sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code stream_protocol p(AF_INET, IPPROTO_TCP); @endcode * Constructing from a specific protocol type: * @code stream_protocol p(asio::ip::tcp::v4()); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class stream_protocol { public: /// Construct a protocol object for a specific address family and protocol. stream_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not stream-oriented. */ template stream_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_STREAM); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const stream_protocol& p1, const stream_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const stream_protocol& p1, const stream_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_stream_socket socket; #if !defined(ASIO_NO_IOSTREAM) /// The generic socket iostream type. typedef basic_socket_iostream iostream; #endif // !defined(ASIO_NO_IOSTREAM) private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_STREAM_PROTOCOL_HPP galera-3-25.3.20/asio/asio/generic/raw_protocol.hpp0000644000015300001660000000572013042054732021667 0ustar jenkinsjenkins// // generic/raw_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_RAW_PROTOCOL_HPP #define ASIO_GENERIC_RAW_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_raw_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic raw socket. /** * The asio::generic::raw_protocol class contains flags necessary for * raw sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code raw_protocol p(AF_INET, IPPROTO_ICMP); @endcode * Constructing from a specific protocol type: * @code raw_protocol p(asio::ip::icmp::v4()); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class raw_protocol { public: /// Construct a protocol object for a specific address family and protocol. raw_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not raw-oriented. */ template raw_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_RAW); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const raw_protocol& p1, const raw_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const raw_protocol& p1, const raw_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_raw_socket socket; private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_RAW_PROTOCOL_HPP galera-3-25.3.20/asio/asio/socket_base.hpp0000644000015300001660000003346713042054732020034 0ustar jenkinsjenkins// // socket_base.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SOCKET_BASE_HPP #define ASIO_SOCKET_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/io_control.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// The socket_base class is used as a base for the basic_stream_socket and /// basic_datagram_socket class templates so that we have a common place to /// define the shutdown_type and enum. class socket_base { public: /// Different ways a socket may be shutdown. enum shutdown_type { #if defined(GENERATING_DOCUMENTATION) /// Shutdown the receive side of the socket. shutdown_receive = implementation_defined, /// Shutdown the send side of the socket. shutdown_send = implementation_defined, /// Shutdown both send and receive on the socket. shutdown_both = implementation_defined #else shutdown_receive = ASIO_OS_DEF(SHUT_RD), shutdown_send = ASIO_OS_DEF(SHUT_WR), shutdown_both = ASIO_OS_DEF(SHUT_RDWR) #endif }; /// Bitmask type for flags that can be passed to send and receive operations. typedef int message_flags; #if defined(GENERATING_DOCUMENTATION) /// Peek at incoming data without removing it from the input queue. static const int message_peek = implementation_defined; /// Process out-of-band data. static const int message_out_of_band = implementation_defined; /// Specify that the data should not be subject to routing. static const int message_do_not_route = implementation_defined; /// Specifies that the data marks the end of a record. static const int message_end_of_record = implementation_defined; #else ASIO_STATIC_CONSTANT(int, message_peek = ASIO_OS_DEF(MSG_PEEK)); ASIO_STATIC_CONSTANT(int, message_out_of_band = ASIO_OS_DEF(MSG_OOB)); ASIO_STATIC_CONSTANT(int, message_do_not_route = ASIO_OS_DEF(MSG_DONTROUTE)); ASIO_STATIC_CONSTANT(int, message_end_of_record = ASIO_OS_DEF(MSG_EOR)); #endif /// Socket option to permit sending of broadcast messages. /** * Implements the SOL_SOCKET/SO_BROADCAST socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::broadcast option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::broadcast option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined broadcast; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_BROADCAST)> broadcast; #endif /// Socket option to enable socket-level debugging. /** * Implements the SOL_SOCKET/SO_DEBUG socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::debug option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::debug option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined debug; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DEBUG)> debug; #endif /// Socket option to prevent routing, use local interfaces only. /** * Implements the SOL_SOCKET/SO_DONTROUTE socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::do_not_route option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::do_not_route option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined do_not_route; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DONTROUTE)> do_not_route; #endif /// Socket option to send keep-alives. /** * Implements the SOL_SOCKET/SO_KEEPALIVE socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::keep_alive option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined keep_alive; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_KEEPALIVE)> keep_alive; #endif /// Socket option for the send buffer size of a socket. /** * Implements the SOL_SOCKET/SO_SNDBUF socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_buffer_size option(8192); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_buffer_size option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined send_buffer_size; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDBUF)> send_buffer_size; #endif /// Socket option for the send low watermark. /** * Implements the SOL_SOCKET/SO_SNDLOWAT socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_low_watermark option(1024); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_low_watermark option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined send_low_watermark; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDLOWAT)> send_low_watermark; #endif /// Socket option for the receive buffer size of a socket. /** * Implements the SOL_SOCKET/SO_RCVBUF socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_buffer_size option(8192); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_buffer_size option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined receive_buffer_size; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVBUF)> receive_buffer_size; #endif /// Socket option for the receive low watermark. /** * Implements the SOL_SOCKET/SO_RCVLOWAT socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_low_watermark option(1024); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_low_watermark option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined receive_low_watermark; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVLOWAT)> receive_low_watermark; #endif /// Socket option to allow the socket to be bound to an address that is /// already in use. /** * Implements the SOL_SOCKET/SO_REUSEADDR socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::reuse_address option(true); * acceptor.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::reuse_address option; * acceptor.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined reuse_address; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_REUSEADDR)> reuse_address; #endif /// Socket option to specify whether the socket lingers on close if unsent /// data is present. /** * Implements the SOL_SOCKET/SO_LINGER socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::linger option(true, 30); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::linger option; * socket.get_option(option); * bool is_set = option.enabled(); * unsigned short timeout = option.timeout(); * @endcode * * @par Concepts: * Socket_Option, Linger_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined linger; #else typedef asio::detail::socket_option::linger< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_LINGER)> linger; #endif /// Socket option to report aborted connections on accept. /** * Implements a custom socket option that determines whether or not an accept * operation is permitted to fail with asio::error::connection_aborted. * By default the option is false. * * @par Examples * Setting the option: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::enable_connection_aborted option(true); * acceptor.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::enable_connection_aborted option; * acceptor.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined enable_connection_aborted; #else typedef asio::detail::socket_option::boolean< asio::detail::custom_socket_option_level, asio::detail::enable_connection_aborted_option> enable_connection_aborted; #endif /// (Deprecated: Use non_blocking().) IO control command to /// set the blocking mode of the socket. /** * Implements the FIONBIO IO control command. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::non_blocking_io command(true); * socket.io_control(command); * @endcode * * @par Concepts: * IO_Control_Command, Boolean_IO_Control_Command. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined non_blocking_io; #else typedef asio::detail::io_control::non_blocking_io non_blocking_io; #endif /// IO control command to get the amount of data that can be read without /// blocking. /** * Implements the FIONREAD IO control command. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::bytes_readable command(true); * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode * * @par Concepts: * IO_Control_Command, Size_IO_Control_Command. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined bytes_readable; #else typedef asio::detail::io_control::bytes_readable bytes_readable; #endif /// The maximum length of the queue of pending incoming connections. #if defined(GENERATING_DOCUMENTATION) static const int max_connections = implementation_defined; #else ASIO_STATIC_CONSTANT(int, max_connections = ASIO_OS_DEF(SOMAXCONN)); #endif protected: /// Protected destructor to prevent deletion through this type. ~socket_base() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SOCKET_BASE_HPP galera-3-25.3.20/asio/asio/handler_type.hpp0000644000015300001660000000730213042054732020215 0ustar jenkinsjenkins// // handler_type.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HANDLER_TYPE_HPP #define ASIO_HANDLER_TYPE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default handler type traits provided for all handlers. /** * The handler_type traits class is used for determining the concrete handler * type to be used for an asynchronous operation. It allows the handler type to * be determined at the point where the specific completion handler signature * is known. * * This template may be specialised for user-defined handler types. */ template struct handler_type { /// The handler type for the specific signature. typedef Handler type; }; #if !defined(GENERATING_DOCUMENTATION) template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; #if defined(ASIO_HAS_MOVE) template struct handler_type : handler_type {}; #endif // defined(ASIO_HAS_MOVE) template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; template struct handler_type : handler_type {}; #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #define ASIO_HANDLER_TYPE(h, sig) \ typename handler_type::type #endif // ASIO_HANDLER_TYPE_HPP galera-3-25.3.20/asio/asio/serial_port_service.hpp0000644000015300001660000001634113042054732021605 0ustar jenkinsjenkins// // serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SERIAL_PORT_SERVICE_HPP #define ASIO_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) \ || defined(GENERATING_DOCUMENTATION) #include #include #include "asio/async_result.hpp" #include "asio/detail/reactive_serial_port_service.hpp" #include "asio/detail/win_iocp_serial_port_service.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/serial_port_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a serial port. class serial_port_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif private: // The type of the platform-specific implementation. #if defined(ASIO_HAS_IOCP) typedef detail::win_iocp_serial_port_service service_impl_type; #else typedef detail::reactive_serial_port_service service_impl_type; #endif public: /// The type of a serial port implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef service_impl_type::native_handle_type native_type; #endif /// The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new serial port service for the specified io_service. explicit serial_port_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(io_service) { } /// Construct a new serial port implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another serial port implementation. void move_assign(implementation_type& impl, serial_port_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a serial port implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Open a serial port. asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec) { return service_impl_.open(impl, device, ec); } /// Assign an existing native handle to a serial port. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return service_impl_.assign(impl, handle, ec); } /// Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native handle implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native handle implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Set a serial port option. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return service_impl_.set_option(impl, option, ec); } /// Get a serial port option. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return service_impl_.get_option(impl, option, ec); } /// Send a break sequence to the serial port. asio::error_code send_break(implementation_type& impl, asio::error_code& ec) { return service_impl_.send_break(impl, ec); } /// Write the given data to the stream. template std::size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return service_impl_.write_some(impl, buffers, ec); } /// Start an asynchronous write. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_write_some(impl, buffers, init.handler); return init.result.get(); } /// Read some data from the stream. template std::size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.read_some(impl, buffers, ec); } /// Start an asynchronous read. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_read_some(impl, buffers, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_SERIAL_PORT) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_SERIAL_PORT_SERVICE_HPP galera-3-25.3.20/asio/asio/buffered_write_stream.hpp0000644000015300001660000001604713042054732022114 0ustar jenkinsjenkins// // buffered_write_stream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_WRITE_STREAM_HPP #define ASIO_BUFFERED_WRITE_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/buffered_write_stream_fwd.hpp" #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffered_stream_storage.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/write.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the write-related operations of a stream. /** * The buffered_write_stream class template can be used to add buffering to the * synchronous and asynchronous write operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_write_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; #if defined(GENERATING_DOCUMENTATION) /// The default buffer size. static const std::size_t default_buffer_size = implementation_defined; #else ASIO_STATIC_CONSTANT(std::size_t, default_buffer_size = 1024); #endif /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_write_stream(Arg& a) : next_layer_(a), storage_(default_buffer_size) { } /// Construct, passing the specified argument to initialise the next layer. template buffered_write_stream(Arg& a, std::size_t buffer_size) : next_layer_(a), storage_(buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Get the io_service associated with the object. asio::io_service& get_io_service() { return next_layer_.get_io_service(); } /// Close the stream. void close() { next_layer_.close(); } /// Close the stream. asio::error_code close(asio::error_code& ec) { return next_layer_.close(ec); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation. Throws an /// exception on failure. std::size_t flush(); /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation, or 0 if an /// error occurred. std::size_t flush(asio::error_code& ec); /// Start an asynchronous flush. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_flush(ASIO_MOVE_ARG(WriteHandler) handler); /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers); /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred and the error handler did not throw. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec); /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler); /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers) { return next_layer_.read_some(buffers); } /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return next_layer_.read_some(buffers, ec); } /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); next_layer_.async_read_some(buffers, ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(ReadHandler, void (asio::error_code, std::size_t)))(init.handler)); return init.result.get(); } /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers) { return next_layer_.peek(buffers); } /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return next_layer_.peek(buffers, ec); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return next_layer_.in_avail(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { return next_layer_.in_avail(ec); } private: /// Copy data into the internal buffer from the specified source buffer. /// Returns the number of bytes copied. template std::size_t copy(const ConstBufferSequence& buffers); /// The next layer. Stream next_layer_; // The data in the buffer. detail::buffered_stream_storage storage_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/buffered_write_stream.hpp" #endif // ASIO_BUFFERED_WRITE_STREAM_HPP galera-3-25.3.20/asio/asio/deadline_timer_service.hpp0000644000015300001660000001124013042054732022220 0ustar jenkinsjenkins// // deadline_timer_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DEADLINE_TIMER_SERVICE_HPP #define ASIO_DEADLINE_TIMER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/async_result.hpp" #include "asio/detail/deadline_timer_service.hpp" #include "asio/io_service.hpp" #include "asio/time_traits.hpp" #include "asio/detail/timer_queue_ptime.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a timer. template > class deadline_timer_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base< deadline_timer_service > #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The time traits type. typedef TimeTraits traits_type; /// The time type. typedef typename traits_type::time_type time_type; /// The duration type. typedef typename traits_type::duration_type duration_type; private: // The type of the platform-specific implementation. typedef detail::deadline_timer_service service_impl_type; public: /// The implementation type of the deadline timer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef typename service_impl_type::implementation_type implementation_type; #endif /// Construct a new timer service for the specified io_service. explicit deadline_timer_service(asio::io_service& io_service) : asio::detail::service_base< deadline_timer_service >(io_service), service_impl_(io_service) { } /// Construct a new timer implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } /// Destroy a timer implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Cancel any asynchronous wait operations associated with the timer. std::size_t cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Cancels one asynchronous wait operation associated with the timer. std::size_t cancel_one(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel_one(impl, ec); } /// Get the expiry time for the timer as an absolute time. time_type expires_at(const implementation_type& impl) const { return service_impl_.expires_at(impl); } /// Set the expiry time for the timer as an absolute time. std::size_t expires_at(implementation_type& impl, const time_type& expiry_time, asio::error_code& ec) { return service_impl_.expires_at(impl, expiry_time, ec); } /// Get the expiry time for the timer relative to now. duration_type expires_from_now(const implementation_type& impl) const { return service_impl_.expires_from_now(impl); } /// Set the expiry time for the timer relative to now. std::size_t expires_from_now(implementation_type& impl, const duration_type& expiry_time, asio::error_code& ec) { return service_impl_.expires_from_now(impl, expiry_time, ec); } // Perform a blocking wait on the timer. void wait(implementation_type& impl, asio::error_code& ec) { service_impl_.wait(impl, ec); } // Start an asynchronous wait on the timer. template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(implementation_type& impl, ASIO_MOVE_ARG(WaitHandler) handler) { detail::async_result_init< WaitHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(WaitHandler)(handler)); service_impl_.async_wait(impl, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_DEADLINE_TIMER_SERVICE_HPP galera-3-25.3.20/asio/asio/spawn.hpp0000644000015300001660000002067613042054732016700 0ustar jenkinsjenkins// // spawn.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SPAWN_HPP #define ASIO_SPAWN_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/weak_ptr.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/io_service.hpp" #include "asio/strand.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Context object the represents the currently executing coroutine. /** * The basic_yield_context class is used to represent the currently executing * stackful coroutine. A basic_yield_context may be passed as a handler to an * asynchronous operation. For example: * * @code template * void my_coroutine(basic_yield_context yield) * { * ... * std::size_t n = my_socket.async_read_some(buffer, yield); * ... * } @endcode * * The initiating function (async_read_some in the above example) suspends the * current coroutine. The coroutine is resumed when the asynchronous operation * completes, and the result of the operation is returned. */ template class basic_yield_context { public: /// The coroutine callee type, used by the implementation. /** * When using Boost.Coroutine v1, this type is: * @code typename coroutine @endcode * When using Boost.Coroutine v2 (unidirectional coroutines), this type is: * @code push_coroutine @endcode */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined callee_type; #elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2) typedef boost::coroutines::push_coroutine callee_type; #else typedef boost::coroutines::coroutine callee_type; #endif /// The coroutine caller type, used by the implementation. /** * When using Boost.Coroutine v1, this type is: * @code typename coroutine::caller_type @endcode * When using Boost.Coroutine v2 (unidirectional coroutines), this type is: * @code pull_coroutine @endcode */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined caller_type; #elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2) typedef boost::coroutines::pull_coroutine caller_type; #else typedef boost::coroutines::coroutine::caller_type caller_type; #endif /// Construct a yield context to represent the specified coroutine. /** * Most applications do not need to use this constructor. Instead, the * spawn() function passes a yield context as an argument to the coroutine * function. */ basic_yield_context( const detail::weak_ptr& coro, caller_type& ca, Handler& handler) : coro_(coro), ca_(ca), handler_(handler), ec_(0) { } /// Return a yield context that sets the specified error_code. /** * By default, when a yield context is used with an asynchronous operation, a * non-success error_code is converted to system_error and thrown. This * operator may be used to specify an error_code object that should instead be * set with the asynchronous operation's result. For example: * * @code template * void my_coroutine(basic_yield_context yield) * { * ... * std::size_t n = my_socket.async_read_some(buffer, yield[ec]); * if (ec) * { * // An error occurred. * } * ... * } @endcode */ basic_yield_context operator[](asio::error_code& ec) const { basic_yield_context tmp(*this); tmp.ec_ = &ec; return tmp; } #if defined(GENERATING_DOCUMENTATION) private: #endif // defined(GENERATING_DOCUMENTATION) detail::weak_ptr coro_; caller_type& ca_; Handler& handler_; asio::error_code* ec_; }; #if defined(GENERATING_DOCUMENTATION) /// Context object that represents the currently executing coroutine. typedef basic_yield_context yield_context; #else // defined(GENERATING_DOCUMENTATION) typedef basic_yield_context< detail::wrapped_handler< io_service::strand, void(*)(), detail::is_continuation_if_running> > yield_context; #endif // defined(GENERATING_DOCUMENTATION) /** * @defgroup spawn asio::spawn * * @brief Start a new stackful coroutine. * * The spawn() function is a high-level wrapper over the Boost.Coroutine * library. This function enables programs to implement asynchronous logic in a * synchronous manner, as illustrated by the following example: * * @code asio::spawn(my_strand, do_echo); * * // ... * * void do_echo(asio::yield_context yield) * { * try * { * char data[128]; * for (;;) * { * std::size_t length = * my_socket.async_read_some( * asio::buffer(data), yield); * * asio::async_write(my_socket, * asio::buffer(data, length), yield); * } * } * catch (std::exception& e) * { * // ... * } * } @endcode */ /*@{*/ /// Start a new stackful coroutine, calling the specified handler when it /// completes. /** * This function is used to launch a new coroutine. * * @param handler A handler to be called when the coroutine exits. More * importantly, the handler provides an execution context (via the the handler * invocation hook) for the coroutine. The handler must have the signature: * @code void handler(); @endcode * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine, inheriting the execution context of another. /** * This function is used to launch a new coroutine. * * @param ctx Identifies the current coroutine as a parent of the new * coroutine. This specifies that the new coroutine should inherit the * execution context of the parent. For example, if the parent coroutine is * executing in a particular strand, then the new coroutine will execute in the * same strand. * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(basic_yield_context ctx, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes in the context of a strand. /** * This function is used to launch a new coroutine. * * @param strand Identifies a strand. By starting multiple coroutines on the * same strand, the implementation ensures that none of those coroutines can * execute simultaneously. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(asio::io_service::strand strand, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes on a given io_service. /** * This function is used to launch a new coroutine. * * @param io_service Identifies the io_service that will run the coroutine. The * new coroutine is implicitly given its own strand within this io_service. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(asio::io_service& io_service, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/spawn.hpp" #endif // ASIO_SPAWN_HPP galera-3-25.3.20/asio/asio/deadline_timer.hpp0000644000015300001660000000203313042054732020500 0ustar jenkinsjenkins// // deadline_timer.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DEADLINE_TIMER_HPP #define ASIO_DEADLINE_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/socket_types.hpp" // Must come before posix_time. #include "asio/basic_deadline_timer.hpp" #include namespace asio { /// Typedef for the typical usage of timer. Uses a UTC clock. typedef basic_deadline_timer deadline_timer; } // namespace asio #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_DEADLINE_TIMER_HPP galera-3-25.3.20/asio/asio/async_result.hpp0000644000015300001660000000467313042054732020262 0ustar jenkinsjenkins// // async_result.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ASYNC_RESULT_HPP #define ASIO_ASYNC_RESULT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/handler_type.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// An interface for customising the behaviour of an initiating function. /** * This template may be specialised for user-defined handler types. */ template class async_result { public: /// The return type of the initiating function. typedef void type; /// Construct an async result from a given handler. /** * When using a specalised async_result, the constructor has an opportunity * to initialise some state associated with the handler, which is then * returned from the initiating function. */ explicit async_result(Handler&) { } /// Obtain the value to be returned from the initiating function. type get() { } }; namespace detail { // Helper template to deduce the true type of a handler, capture a local copy // of the handler, and then create an async_result for the handler. template struct async_result_init { explicit async_result_init(ASIO_MOVE_ARG(Handler) orig_handler) : handler(ASIO_MOVE_CAST(Handler)(orig_handler)), result(handler) { } typename handler_type::type handler; async_result::type> result; }; template struct async_result_type_helper { typedef typename async_result< typename handler_type::type >::type type; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(GENERATING_DOCUMENTATION) # define ASIO_INITFN_RESULT_TYPE(h, sig) \ void_or_deduced #elif defined(_MSC_VER) && (_MSC_VER < 1500) # define ASIO_INITFN_RESULT_TYPE(h, sig) \ typename ::asio::detail::async_result_type_helper::type #else # define ASIO_INITFN_RESULT_TYPE(h, sig) \ typename ::asio::async_result< \ typename ::asio::handler_type::type>::type #endif #endif // ASIO_ASYNC_RESULT_HPP galera-3-25.3.20/asio/asio/buffer.hpp0000644000015300001660000021036713042054732017017 0ustar jenkinsjenkins// // buffer.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFER_HPP #define ASIO_BUFFER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include "asio/detail/array_fwd.hpp" #if defined(ASIO_MSVC) # if defined(_HAS_ITERATOR_DEBUGGING) && (_HAS_ITERATOR_DEBUGGING != 0) # if !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # define ASIO_ENABLE_BUFFER_DEBUGGING # endif // !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # endif // defined(_HAS_ITERATOR_DEBUGGING) #endif // defined(ASIO_MSVC) #if defined(__GNUC__) # if defined(_GLIBCXX_DEBUG) # if !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # define ASIO_ENABLE_BUFFER_DEBUGGING # endif // !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # endif // defined(_GLIBCXX_DEBUG) #endif // defined(__GNUC__) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) # include "asio/detail/function.hpp" #endif // ASIO_ENABLE_BUFFER_DEBUGGING #if defined(ASIO_HAS_BOOST_WORKAROUND) # include # if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582)) \ || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x590)) # define ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND # endif // BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582)) // || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x590)) #endif // defined(ASIO_HAS_BOOST_WORKAROUND) #if defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) # include "asio/detail/type_traits.hpp" #endif // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) #include "asio/detail/push_options.hpp" namespace asio { class mutable_buffer; class const_buffer; namespace detail { void* buffer_cast_helper(const mutable_buffer&); const void* buffer_cast_helper(const const_buffer&); std::size_t buffer_size_helper(const mutable_buffer&); std::size_t buffer_size_helper(const const_buffer&); } // namespace detail /// Holds a buffer that can be modified. /** * The mutable_buffer class provides a safe representation of a buffer that can * be modified. It does not own the underlying data, and so is cheap to copy or * assign. * * @par Accessing Buffer Contents * * The contents of a buffer may be accessed using the @ref buffer_size * and @ref buffer_cast functions: * * @code asio::mutable_buffer b1 = ...; * std::size_t s1 = asio::buffer_size(b1); * unsigned char* p1 = asio::buffer_cast(b1); * @endcode * * The asio::buffer_cast function permits violations of type safety, so * uses of it in application code should be carefully considered. */ class mutable_buffer { public: /// Construct an empty buffer. mutable_buffer() : data_(0), size_(0) { } /// Construct a buffer to represent a given memory range. mutable_buffer(void* data, std::size_t size) : data_(data), size_(size) { } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) mutable_buffer(void* data, std::size_t size, asio::detail::function debug_check) : data_(data), size_(size), debug_check_(debug_check) { } const asio::detail::function& get_debug_check() const { return debug_check_; } #endif // ASIO_ENABLE_BUFFER_DEBUGGING private: friend void* asio::detail::buffer_cast_helper( const mutable_buffer& b); friend std::size_t asio::detail::buffer_size_helper( const mutable_buffer& b); void* data_; std::size_t size_; #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) asio::detail::function debug_check_; #endif // ASIO_ENABLE_BUFFER_DEBUGGING }; namespace detail { inline void* buffer_cast_helper(const mutable_buffer& b) { #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (b.size_ && b.debug_check_) b.debug_check_(); #endif // ASIO_ENABLE_BUFFER_DEBUGGING return b.data_; } inline std::size_t buffer_size_helper(const mutable_buffer& b) { return b.size_; } } // namespace detail /// Adapts a single modifiable buffer so that it meets the requirements of the /// MutableBufferSequence concept. class mutable_buffers_1 : public mutable_buffer { public: /// The type for each element in the list of buffers. typedef mutable_buffer value_type; /// A random-access iterator type that may be used to read elements. typedef const mutable_buffer* const_iterator; /// Construct to represent a given memory range. mutable_buffers_1(void* data, std::size_t size) : mutable_buffer(data, size) { } /// Construct to represent a single modifiable buffer. explicit mutable_buffers_1(const mutable_buffer& b) : mutable_buffer(b) { } /// Get a random-access iterator to the first element. const_iterator begin() const { return this; } /// Get a random-access iterator for one past the last element. const_iterator end() const { return begin() + 1; } }; /// Holds a buffer that cannot be modified. /** * The const_buffer class provides a safe representation of a buffer that cannot * be modified. It does not own the underlying data, and so is cheap to copy or * assign. * * @par Accessing Buffer Contents * * The contents of a buffer may be accessed using the @ref buffer_size * and @ref buffer_cast functions: * * @code asio::const_buffer b1 = ...; * std::size_t s1 = asio::buffer_size(b1); * const unsigned char* p1 = asio::buffer_cast(b1); * @endcode * * The asio::buffer_cast function permits violations of type safety, so * uses of it in application code should be carefully considered. */ class const_buffer { public: /// Construct an empty buffer. const_buffer() : data_(0), size_(0) { } /// Construct a buffer to represent a given memory range. const_buffer(const void* data, std::size_t size) : data_(data), size_(size) { } /// Construct a non-modifiable buffer from a modifiable one. const_buffer(const mutable_buffer& b) : data_(asio::detail::buffer_cast_helper(b)), size_(asio::detail::buffer_size_helper(b)) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , debug_check_(b.get_debug_check()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING { } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) const_buffer(const void* data, std::size_t size, asio::detail::function debug_check) : data_(data), size_(size), debug_check_(debug_check) { } const asio::detail::function& get_debug_check() const { return debug_check_; } #endif // ASIO_ENABLE_BUFFER_DEBUGGING private: friend const void* asio::detail::buffer_cast_helper( const const_buffer& b); friend std::size_t asio::detail::buffer_size_helper( const const_buffer& b); const void* data_; std::size_t size_; #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) asio::detail::function debug_check_; #endif // ASIO_ENABLE_BUFFER_DEBUGGING }; namespace detail { inline const void* buffer_cast_helper(const const_buffer& b) { #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (b.size_ && b.debug_check_) b.debug_check_(); #endif // ASIO_ENABLE_BUFFER_DEBUGGING return b.data_; } inline std::size_t buffer_size_helper(const const_buffer& b) { return b.size_; } } // namespace detail /// Adapts a single non-modifiable buffer so that it meets the requirements of /// the ConstBufferSequence concept. class const_buffers_1 : public const_buffer { public: /// The type for each element in the list of buffers. typedef const_buffer value_type; /// A random-access iterator type that may be used to read elements. typedef const const_buffer* const_iterator; /// Construct to represent a given memory range. const_buffers_1(const void* data, std::size_t size) : const_buffer(data, size) { } /// Construct to represent a single non-modifiable buffer. explicit const_buffers_1(const const_buffer& b) : const_buffer(b) { } /// Get a random-access iterator to the first element. const_iterator begin() const { return this; } /// Get a random-access iterator for one past the last element. const_iterator end() const { return begin() + 1; } }; /// An implementation of both the ConstBufferSequence and MutableBufferSequence /// concepts to represent a null buffer sequence. class null_buffers { public: /// The type for each element in the list of buffers. typedef mutable_buffer value_type; /// A random-access iterator type that may be used to read elements. typedef const mutable_buffer* const_iterator; /// Get a random-access iterator to the first element. const_iterator begin() const { return &buf_; } /// Get a random-access iterator for one past the last element. const_iterator end() const { return &buf_; } private: mutable_buffer buf_; }; /** @defgroup buffer_size asio::buffer_size * * @brief The asio::buffer_size function determines the total number of * bytes in a buffer or buffer sequence. */ /*@{*/ /// Get the number of bytes in a modifiable buffer. inline std::size_t buffer_size(const mutable_buffer& b) { return detail::buffer_size_helper(b); } /// Get the number of bytes in a modifiable buffer. inline std::size_t buffer_size(const mutable_buffers_1& b) { return detail::buffer_size_helper(b); } /// Get the number of bytes in a non-modifiable buffer. inline std::size_t buffer_size(const const_buffer& b) { return detail::buffer_size_helper(b); } /// Get the number of bytes in a non-modifiable buffer. inline std::size_t buffer_size(const const_buffers_1& b) { return detail::buffer_size_helper(b); } /// Get the total number of bytes in a buffer sequence. /** * The @c BufferSequence template parameter may meet either of the @c * ConstBufferSequence or @c MutableBufferSequence type requirements. */ template inline std::size_t buffer_size(const BufferSequence& b) { std::size_t total_buffer_size = 0; typename BufferSequence::const_iterator iter = b.begin(); typename BufferSequence::const_iterator end = b.end(); for (; iter != end; ++iter) total_buffer_size += detail::buffer_size_helper(*iter); return total_buffer_size; } /*@}*/ /** @defgroup buffer_cast asio::buffer_cast * * @brief The asio::buffer_cast function is used to obtain a pointer to * the underlying memory region associated with a buffer. * * @par Examples: * * To access the memory of a non-modifiable buffer, use: * @code asio::const_buffer b1 = ...; * const unsigned char* p1 = asio::buffer_cast(b1); * @endcode * * To access the memory of a modifiable buffer, use: * @code asio::mutable_buffer b2 = ...; * unsigned char* p2 = asio::buffer_cast(b2); * @endcode * * The asio::buffer_cast function permits violations of type safety, so * uses of it in application code should be carefully considered. */ /*@{*/ /// Cast a non-modifiable buffer to a specified pointer to POD type. template inline PointerToPodType buffer_cast(const mutable_buffer& b) { return static_cast(detail::buffer_cast_helper(b)); } /// Cast a non-modifiable buffer to a specified pointer to POD type. template inline PointerToPodType buffer_cast(const const_buffer& b) { return static_cast(detail::buffer_cast_helper(b)); } /*@}*/ /// Create a new modifiable buffer that is offset from the start of another. /** * @relates mutable_buffer */ inline mutable_buffer operator+(const mutable_buffer& b, std::size_t start) { if (start > buffer_size(b)) return mutable_buffer(); char* new_data = buffer_cast(b) + start; std::size_t new_size = buffer_size(b) - start; return mutable_buffer(new_data, new_size #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new modifiable buffer that is offset from the start of another. /** * @relates mutable_buffer */ inline mutable_buffer operator+(std::size_t start, const mutable_buffer& b) { if (start > buffer_size(b)) return mutable_buffer(); char* new_data = buffer_cast(b) + start; std::size_t new_size = buffer_size(b) - start; return mutable_buffer(new_data, new_size #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that is offset from the start of another. /** * @relates const_buffer */ inline const_buffer operator+(const const_buffer& b, std::size_t start) { if (start > buffer_size(b)) return const_buffer(); const char* new_data = buffer_cast(b) + start; std::size_t new_size = buffer_size(b) - start; return const_buffer(new_data, new_size #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that is offset from the start of another. /** * @relates const_buffer */ inline const_buffer operator+(std::size_t start, const const_buffer& b) { if (start > buffer_size(b)) return const_buffer(); const char* new_data = buffer_cast(b) + start; std::size_t new_size = buffer_size(b) - start; return const_buffer(new_data, new_size #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) namespace detail { template class buffer_debug_check { public: buffer_debug_check(Iterator iter) : iter_(iter) { } ~buffer_debug_check() { #if defined(ASIO_MSVC) && (ASIO_MSVC == 1400) // MSVC 8's string iterator checking may crash in a std::string::iterator // object's destructor when the iterator points to an already-destroyed // std::string object, unless the iterator is cleared first. iter_ = Iterator(); #endif // defined(ASIO_MSVC) && (ASIO_MSVC == 1400) } void operator()() { *iter_; } private: Iterator iter_; }; } // namespace detail #endif // ASIO_ENABLE_BUFFER_DEBUGGING /** @defgroup buffer asio::buffer * * @brief The asio::buffer function is used to create a buffer object to * represent raw memory, an array of POD elements, a vector of POD elements, * or a std::string. * * A buffer object represents a contiguous region of memory as a 2-tuple * consisting of a pointer and size in bytes. A tuple of the form {void*, * size_t} specifies a mutable (modifiable) region of memory. Similarly, a * tuple of the form {const void*, size_t} specifies a const * (non-modifiable) region of memory. These two forms correspond to the classes * mutable_buffer and const_buffer, respectively. To mirror C++'s conversion * rules, a mutable_buffer is implicitly convertible to a const_buffer, and the * opposite conversion is not permitted. * * The simplest use case involves reading or writing a single buffer of a * specified size: * * @code sock.send(asio::buffer(data, size)); @endcode * * In the above example, the return value of asio::buffer meets the * requirements of the ConstBufferSequence concept so that it may be directly * passed to the socket's write function. A buffer created for modifiable * memory also meets the requirements of the MutableBufferSequence concept. * * An individual buffer may be created from a builtin array, std::vector, * std::array or boost::array of POD elements. This helps prevent buffer * overruns by automatically determining the size of the buffer: * * @code char d1[128]; * size_t bytes_transferred = sock.receive(asio::buffer(d1)); * * std::vector d2(128); * bytes_transferred = sock.receive(asio::buffer(d2)); * * std::array d3; * bytes_transferred = sock.receive(asio::buffer(d3)); * * boost::array d4; * bytes_transferred = sock.receive(asio::buffer(d4)); @endcode * * In all three cases above, the buffers created are exactly 128 bytes long. * Note that a vector is @e never automatically resized when creating or using * a buffer. The buffer size is determined using the vector's size() * member function, and not its capacity. * * @par Accessing Buffer Contents * * The contents of a buffer may be accessed using the @ref buffer_size and * @ref buffer_cast functions: * * @code asio::mutable_buffer b1 = ...; * std::size_t s1 = asio::buffer_size(b1); * unsigned char* p1 = asio::buffer_cast(b1); * * asio::const_buffer b2 = ...; * std::size_t s2 = asio::buffer_size(b2); * const void* p2 = asio::buffer_cast(b2); @endcode * * The asio::buffer_cast function permits violations of type safety, so * uses of it in application code should be carefully considered. * * For convenience, the @ref buffer_size function also works on buffer * sequences (that is, types meeting the ConstBufferSequence or * MutableBufferSequence type requirements). In this case, the function returns * the total size of all buffers in the sequence. * * @par Buffer Copying * * The @ref buffer_copy function may be used to copy raw bytes between * individual buffers and buffer sequences. * * In particular, when used with the @ref buffer_size, the @ref buffer_copy * function can be used to linearise a sequence of buffers. For example: * * @code vector buffers = ...; * * vector data(asio::buffer_size(buffers)); * asio::buffer_copy(asio::buffer(data), buffers); @endcode * * Note that @ref buffer_copy is implemented in terms of @c memcpy, and * consequently it cannot be used to copy between overlapping memory regions. * * @par Buffer Invalidation * * A buffer object does not have any ownership of the memory it refers to. It * is the responsibility of the application to ensure the memory region remains * valid until it is no longer required for an I/O operation. When the memory * is no longer available, the buffer is said to have been invalidated. * * For the asio::buffer overloads that accept an argument of type * std::vector, the buffer objects returned are invalidated by any vector * operation that also invalidates all references, pointers and iterators * referring to the elements in the sequence (C++ Std, 23.2.4) * * For the asio::buffer overloads that accept an argument of type * std::basic_string, the buffer objects returned are invalidated according to * the rules defined for invalidation of references, pointers and iterators * referring to elements of the sequence (C++ Std, 21.3). * * @par Buffer Arithmetic * * Buffer objects may be manipulated using simple arithmetic in a safe way * which helps prevent buffer overruns. Consider an array initialised as * follows: * * @code boost::array a = { 'a', 'b', 'c', 'd', 'e' }; @endcode * * A buffer object @c b1 created using: * * @code b1 = asio::buffer(a); @endcode * * represents the entire array, { 'a', 'b', 'c', 'd', 'e' }. An * optional second argument to the asio::buffer function may be used to * limit the size, in bytes, of the buffer: * * @code b2 = asio::buffer(a, 3); @endcode * * such that @c b2 represents the data { 'a', 'b', 'c' }. Even if the * size argument exceeds the actual size of the array, the size of the buffer * object created will be limited to the array size. * * An offset may be applied to an existing buffer to create a new one: * * @code b3 = b1 + 2; @endcode * * where @c b3 will set to represent { 'c', 'd', 'e' }. If the offset * exceeds the size of the existing buffer, the newly created buffer will be * empty. * * Both an offset and size may be specified to create a buffer that corresponds * to a specific range of bytes within an existing buffer: * * @code b4 = asio::buffer(b1 + 1, 3); @endcode * * so that @c b4 will refer to the bytes { 'b', 'c', 'd' }. * * @par Buffers and Scatter-Gather I/O * * To read or write using multiple buffers (i.e. scatter-gather I/O), multiple * buffer objects may be assigned into a container that supports the * MutableBufferSequence (for read) or ConstBufferSequence (for write) concepts: * * @code * char d1[128]; * std::vector d2(128); * boost::array d3; * * boost::array bufs1 = { * asio::buffer(d1), * asio::buffer(d2), * asio::buffer(d3) }; * bytes_transferred = sock.receive(bufs1); * * std::vector bufs2; * bufs2.push_back(asio::buffer(d1)); * bufs2.push_back(asio::buffer(d2)); * bufs2.push_back(asio::buffer(d3)); * bytes_transferred = sock.send(bufs2); @endcode */ /*@{*/ /// Create a new modifiable buffer from an existing buffer. /** * @returns mutable_buffers_1(b). */ inline mutable_buffers_1 buffer(const mutable_buffer& b) { return mutable_buffers_1(b); } /// Create a new modifiable buffer from an existing buffer. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * buffer_cast(b), * min(buffer_size(b), max_size_in_bytes)); @endcode */ inline mutable_buffers_1 buffer(const mutable_buffer& b, std::size_t max_size_in_bytes) { return mutable_buffers_1( mutable_buffer(buffer_cast(b), buffer_size(b) < max_size_in_bytes ? buffer_size(b) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new non-modifiable buffer from an existing buffer. /** * @returns const_buffers_1(b). */ inline const_buffers_1 buffer(const const_buffer& b) { return const_buffers_1(b); } /// Create a new non-modifiable buffer from an existing buffer. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * buffer_cast(b), * min(buffer_size(b), max_size_in_bytes)); @endcode */ inline const_buffers_1 buffer(const const_buffer& b, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(buffer_cast(b), buffer_size(b) < max_size_in_bytes ? buffer_size(b) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new modifiable buffer that represents the given memory range. /** * @returns mutable_buffers_1(data, size_in_bytes). */ inline mutable_buffers_1 buffer(void* data, std::size_t size_in_bytes) { return mutable_buffers_1(mutable_buffer(data, size_in_bytes)); } /// Create a new non-modifiable buffer that represents the given memory range. /** * @returns const_buffers_1(data, size_in_bytes). */ inline const_buffers_1 buffer(const void* data, std::size_t size_in_bytes) { return const_buffers_1(const_buffer(data, size_in_bytes)); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * static_cast(data), * N * sizeof(PodType)); @endcode */ template inline mutable_buffers_1 buffer(PodType (&data)[N]) { return mutable_buffers_1(mutable_buffer(data, N * sizeof(PodType))); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * static_cast(data), * min(N * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline mutable_buffers_1 buffer(PodType (&data)[N], std::size_t max_size_in_bytes) { return mutable_buffers_1( mutable_buffer(data, N * sizeof(PodType) < max_size_in_bytes ? N * sizeof(PodType) : max_size_in_bytes)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * static_cast(data), * N * sizeof(PodType)); @endcode */ template inline const_buffers_1 buffer(const PodType (&data)[N]) { return const_buffers_1(const_buffer(data, N * sizeof(PodType))); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * static_cast(data), * min(N * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline const_buffers_1 buffer(const PodType (&data)[N], std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data, N * sizeof(PodType) < max_size_in_bytes ? N * sizeof(PodType) : max_size_in_bytes)); } #if defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) // Borland C++ and Sun Studio think the overloads: // // unspecified buffer(boost::array& array ...); // // and // // unspecified buffer(boost::array& array ...); // // are ambiguous. This will be worked around by using a buffer_types traits // class that contains typedefs for the appropriate buffer and container // classes, based on whether PodType is const or non-const. namespace detail { template struct buffer_types_base; template <> struct buffer_types_base { typedef mutable_buffer buffer_type; typedef mutable_buffers_1 container_type; }; template <> struct buffer_types_base { typedef const_buffer buffer_type; typedef const_buffers_1 container_type; }; template struct buffer_types : public buffer_types_base::value> { }; } // namespace detail template inline typename detail::buffer_types::container_type buffer(boost::array& data) { typedef typename asio::detail::buffer_types::buffer_type buffer_type; typedef typename asio::detail::buffer_types::container_type container_type; return container_type( buffer_type(data.c_array(), data.size() * sizeof(PodType))); } template inline typename detail::buffer_types::container_type buffer(boost::array& data, std::size_t max_size_in_bytes) { typedef typename asio::detail::buffer_types::buffer_type buffer_type; typedef typename asio::detail::buffer_types::container_type container_type; return container_type( buffer_type(data.c_array(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } #else // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline mutable_buffers_1 buffer(boost::array& data) { return mutable_buffers_1( mutable_buffer(data.c_array(), data.size() * sizeof(PodType))); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline mutable_buffers_1 buffer(boost::array& data, std::size_t max_size_in_bytes) { return mutable_buffers_1( mutable_buffer(data.c_array(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline const_buffers_1 buffer(boost::array& data) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType))); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline const_buffers_1 buffer(boost::array& data, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } #endif // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline const_buffers_1 buffer(const boost::array& data) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType))); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline const_buffers_1 buffer(const boost::array& data, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } #if defined(ASIO_HAS_STD_ARRAY) || defined(GENERATING_DOCUMENTATION) /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline mutable_buffers_1 buffer(std::array& data) { return mutable_buffers_1( mutable_buffer(data.data(), data.size() * sizeof(PodType))); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline mutable_buffers_1 buffer(std::array& data, std::size_t max_size_in_bytes) { return mutable_buffers_1( mutable_buffer(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline const_buffers_1 buffer(std::array& data) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType))); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline const_buffers_1 buffer(std::array& data, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline const_buffers_1 buffer(const std::array& data) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType))); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline const_buffers_1 buffer(const std::array& data, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } #endif // defined(ASIO_HAS_STD_ARRAY) || defined(GENERATING_DOCUMENTATION) /// Create a new modifiable buffer that represents the given POD vector. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * data.size() ? &data[0] : 0, * data.size() * sizeof(PodType)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline mutable_buffers_1 buffer(std::vector& data) { return mutable_buffers_1( mutable_buffer(data.size() ? &data[0] : 0, data.size() * sizeof(PodType) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new modifiable buffer that represents the given POD vector. /** * @returns A mutable_buffers_1 value equivalent to: * @code mutable_buffers_1( * data.size() ? &data[0] : 0, * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline mutable_buffers_1 buffer(std::vector& data, std::size_t max_size_in_bytes) { return mutable_buffers_1( mutable_buffer(data.size() ? &data[0] : 0, data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new non-modifiable buffer that represents the given POD vector. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.size() ? &data[0] : 0, * data.size() * sizeof(PodType)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline const_buffers_1 buffer( const std::vector& data) { return const_buffers_1( const_buffer(data.size() ? &data[0] : 0, data.size() * sizeof(PodType) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new non-modifiable buffer that represents the given POD vector. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.size() ? &data[0] : 0, * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline const_buffers_1 buffer( const std::vector& data, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data.size() ? &data[0] : 0, data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new non-modifiable buffer that represents the given string. /** * @returns const_buffers_1(data.data(), data.size() * sizeof(Elem)). * * @note The buffer is invalidated by any non-const operation called on the * given string object. */ template inline const_buffers_1 buffer( const std::basic_string& data) { return const_buffers_1(const_buffer(data.data(), data.size() * sizeof(Elem) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::basic_string::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new non-modifiable buffer that represents the given string. /** * @returns A const_buffers_1 value equivalent to: * @code const_buffers_1( * data.data(), * min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any non-const operation called on the * given string object. */ template inline const_buffers_1 buffer( const std::basic_string& data, std::size_t max_size_in_bytes) { return const_buffers_1( const_buffer(data.data(), data.size() * sizeof(Elem) < max_size_in_bytes ? data.size() * sizeof(Elem) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::basic_string::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /*@}*/ /** @defgroup buffer_copy asio::buffer_copy * * @brief The asio::buffer_copy function is used to copy bytes from a * source buffer (or buffer sequence) to a target buffer (or buffer sequence). * * The @c buffer_copy function is available in two forms: * * @li A 2-argument form: @c buffer_copy(target, source) * * @li A 3-argument form: @c buffer_copy(target, source, max_bytes_to_copy) * Both forms return the number of bytes actually copied. The number of bytes * copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c If specified, @c max_bytes_to_copy. * * This prevents buffer overflow, regardless of the buffer sizes used in the * copy operation. * * Note that @ref buffer_copy is implemented in terms of @c memcpy, and * consequently it cannot be used to copy between overlapping memory regions. */ /*@{*/ /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const const_buffer& source) { using namespace std; // For memcpy. std::size_t target_size = buffer_size(target); std::size_t source_size = buffer_size(source); std::size_t n = target_size < source_size ? target_size : source_size; memcpy(buffer_cast(target), buffer_cast(source), n); return n; } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const const_buffers_1& source) { return buffer_copy(target, static_cast(source)); } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const mutable_buffer& source) { return buffer_copy(target, const_buffer(source)); } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const mutable_buffers_1& source) { return buffer_copy(target, const_buffer(source)); } /// Copies bytes from a source buffer sequence to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template std::size_t buffer_copy(const mutable_buffer& target, const ConstBufferSequence& source) { std::size_t total_bytes_copied = 0; typename ConstBufferSequence::const_iterator source_iter = source.begin(); typename ConstBufferSequence::const_iterator source_end = source.end(); for (mutable_buffer target_buffer(target); buffer_size(target_buffer) && source_iter != source_end; ++source_iter) { const_buffer source_buffer(*source_iter); std::size_t bytes_copied = buffer_copy(target_buffer, source_buffer); total_bytes_copied += bytes_copied; target_buffer = target_buffer + bytes_copied; } return total_bytes_copied; } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const const_buffer& source) { return buffer_copy(static_cast(target), source); } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const const_buffers_1& source) { return buffer_copy(static_cast(target), static_cast(source)); } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const mutable_buffer& source) { return buffer_copy(static_cast(target), const_buffer(source)); } /// Copies bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const mutable_buffers_1& source) { return buffer_copy(static_cast(target), const_buffer(source)); } /// Copies bytes from a source buffer sequence to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const mutable_buffers_1& target, const ConstBufferSequence& source) { return buffer_copy(static_cast(target), source); } /// Copies bytes from a source buffer to a target buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template std::size_t buffer_copy(const MutableBufferSequence& target, const const_buffer& source) { std::size_t total_bytes_copied = 0; typename MutableBufferSequence::const_iterator target_iter = target.begin(); typename MutableBufferSequence::const_iterator target_end = target.end(); for (const_buffer source_buffer(source); buffer_size(source_buffer) && target_iter != target_end; ++target_iter) { mutable_buffer target_buffer(*target_iter); std::size_t bytes_copied = buffer_copy(target_buffer, source_buffer); total_bytes_copied += bytes_copied; source_buffer = source_buffer + bytes_copied; } return total_bytes_copied; } /// Copies bytes from a source buffer to a target buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const const_buffers_1& source) { return buffer_copy(target, static_cast(source)); } /// Copies bytes from a source buffer to a target buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const mutable_buffer& source) { return buffer_copy(target, const_buffer(source)); } /// Copies bytes from a source buffer to a target buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const mutable_buffers_1& source) { return buffer_copy(target, const_buffer(source)); } /// Copies bytes from a source buffer sequence to a target buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template std::size_t buffer_copy(const MutableBufferSequence& target, const ConstBufferSequence& source) { std::size_t total_bytes_copied = 0; typename MutableBufferSequence::const_iterator target_iter = target.begin(); typename MutableBufferSequence::const_iterator target_end = target.end(); std::size_t target_buffer_offset = 0; typename ConstBufferSequence::const_iterator source_iter = source.begin(); typename ConstBufferSequence::const_iterator source_end = source.end(); std::size_t source_buffer_offset = 0; while (target_iter != target_end && source_iter != source_end) { mutable_buffer target_buffer = mutable_buffer(*target_iter) + target_buffer_offset; const_buffer source_buffer = const_buffer(*source_iter) + source_buffer_offset; std::size_t bytes_copied = buffer_copy(target_buffer, source_buffer); total_bytes_copied += bytes_copied; if (bytes_copied == buffer_size(target_buffer)) { ++target_iter; target_buffer_offset = 0; } else target_buffer_offset += bytes_copied; if (bytes_copied == buffer_size(source_buffer)) { ++source_iter; source_buffer_offset = 0; } else source_buffer_offset += bytes_copied; } return total_bytes_copied; } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const const_buffer& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const const_buffers_1& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const mutable_buffer& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffer& target, const mutable_buffers_1& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer sequence to a target /// buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const mutable_buffer& target, const ConstBufferSequence& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const const_buffer& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const const_buffers_1& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const mutable_buffer& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ inline std::size_t buffer_copy(const mutable_buffers_1& target, const mutable_buffers_1& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer sequence to a target /// buffer. /** * @param target A modifiable buffer representing the memory region to which * the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const mutable_buffers_1& target, const ConstBufferSequence& source, std::size_t max_bytes_to_copy) { return buffer_copy(buffer(target, max_bytes_to_copy), source); } /// Copies a limited number of bytes from a source buffer to a target buffer /// sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const const_buffer& source, std::size_t max_bytes_to_copy) { return buffer_copy(target, buffer(source, max_bytes_to_copy)); } /// Copies a limited number of bytes from a source buffer to a target buffer /// sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer representing the memory region from * which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const const_buffers_1& source, std::size_t max_bytes_to_copy) { return buffer_copy(target, buffer(source, max_bytes_to_copy)); } /// Copies a limited number of bytes from a source buffer to a target buffer /// sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const mutable_buffer& source, std::size_t max_bytes_to_copy) { return buffer_copy(target, buffer(source, max_bytes_to_copy)); } /// Copies a limited number of bytes from a source buffer to a target buffer /// sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A modifiable buffer representing the memory region from which * the bytes will be copied. The contents of the source buffer will not be * modified. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const mutable_buffers_1& source, std::size_t max_bytes_to_copy) { return buffer_copy(target, buffer(source, max_bytes_to_copy)); } /// Copies a limited number of bytes from a source buffer sequence to a target /// buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template std::size_t buffer_copy(const MutableBufferSequence& target, const ConstBufferSequence& source, std::size_t max_bytes_to_copy) { std::size_t total_bytes_copied = 0; typename MutableBufferSequence::const_iterator target_iter = target.begin(); typename MutableBufferSequence::const_iterator target_end = target.end(); std::size_t target_buffer_offset = 0; typename ConstBufferSequence::const_iterator source_iter = source.begin(); typename ConstBufferSequence::const_iterator source_end = source.end(); std::size_t source_buffer_offset = 0; while (total_bytes_copied != max_bytes_to_copy && target_iter != target_end && source_iter != source_end) { mutable_buffer target_buffer = mutable_buffer(*target_iter) + target_buffer_offset; const_buffer source_buffer = const_buffer(*source_iter) + source_buffer_offset; std::size_t bytes_copied = buffer_copy(target_buffer, source_buffer, max_bytes_to_copy - total_bytes_copied); total_bytes_copied += bytes_copied; if (bytes_copied == buffer_size(target_buffer)) { ++target_iter; target_buffer_offset = 0; } else target_buffer_offset += bytes_copied; if (bytes_copied == buffer_size(source_buffer)) { ++source_iter; source_buffer_offset = 0; } else source_buffer_offset += bytes_copied; } return total_bytes_copied; } /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BUFFER_HPP galera-3-25.3.20/asio/asio/basic_streambuf_fwd.hpp0000644000015300001660000000137313042054732021532 0ustar jenkinsjenkins// // basic_streambuf_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_STREAMBUF_FWD_HPP #define ASIO_BASIC_STREAMBUF_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include namespace asio { template > class basic_streambuf; } // namespace asio #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_BASIC_STREAMBUF_FWD_HPP galera-3-25.3.20/asio/asio/thread.hpp0000644000015300001660000000440513042054732017007 0ustar jenkinsjenkins// // thread.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_THREAD_HPP #define ASIO_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// A simple abstraction for starting threads. /** * The asio::thread class implements the smallest possible subset of the * functionality of boost::thread. It is intended to be used only for starting * a thread and waiting for it to exit. If more extensive threading * capabilities are required, you are strongly advised to use something else. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * A typical use of asio::thread would be to launch a thread to run an * io_service's event processing loop: * * @par * @code asio::io_service io_service; * // ... * asio::thread t(boost::bind(&asio::io_service::run, &io_service)); * // ... * t.join(); @endcode */ class thread : private noncopyable { public: /// Start a new thread that executes the supplied function. /** * This constructor creates a new thread that will execute the given function * or function object. * * @param f The function or function object to be run in the thread. The * function signature must be: @code void f(); @endcode */ template explicit thread(Function f) : impl_(f) { } /// Destructor. ~thread() { } /// Wait for the thread to exit. /** * This function will block until the thread has exited. * * If this function is not called before the thread object is destroyed, the * thread itself will continue to run until completion. You will, however, * no longer have the ability to wait for it to exit. */ void join() { impl_.join(); } private: detail::thread impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_THREAD_HPP galera-3-25.3.20/asio/asio/write.hpp0000644000015300001660000006061013042054732016672 0ustar jenkinsjenkins// // write.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WRITE_HPP #define ASIO_WRITE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/basic_streambuf_fwd.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup write asio::write * * @brief Write a certain amount of data to a stream before returning. */ /*@{*/ /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write(s, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers); /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write(s, asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, asio::error_code& ec); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write(s, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec); #if !defined(ASIO_NO_IOSTREAM) /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::write( * s, b, * asio::transfer_all()); @endcode */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b); /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::write( * s, b, * asio::transfer_all(), ec); @endcode */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b, asio::error_code& ec); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b, CompletionCondition completion_condition); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) /*@}*/ /** * @defgroup async_write asio::async_write * * @brief Start an asynchronous operation to write a certain amount of data to a * stream. */ /*@{*/ /// Start an asynchronous operation to write all of the supplied data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * asio::async_write(s, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler); /// Start an asynchronous operation to write a certain amount of data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's async_write_some function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::async_write(s, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler); #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to write all of the supplied data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param b A basic_streambuf object from which data will be written. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, basic_streambuf& b, ASIO_MOVE_ARG(WriteHandler) handler); /// Start an asynchronous operation to write a certain amount of data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param b A basic_streambuf object from which data will be written. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's async_write_some function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/write.hpp" #endif // ASIO_WRITE_HPP galera-3-25.3.20/asio/asio/handler_invoke_hook.hpp0000644000015300001660000000515413042054732021552 0ustar jenkinsjenkins// // handler_invoke_hook.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HANDLER_INVOKE_HOOK_HPP #define ASIO_HANDLER_INVOKE_HOOK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { /** @defgroup asio_handler_invoke asio::asio_handler_invoke * * @brief Default invoke function for handlers. * * Completion handlers for asynchronous operations are invoked by the * io_service associated with the corresponding object (e.g. a socket or * deadline_timer). Certain guarantees are made on when the handler may be * invoked, in particular that a handler can only be invoked from a thread that * is currently calling @c run() on the corresponding io_service object. * Handlers may subsequently be invoked through other objects (such as * io_service::strand objects) that provide additional guarantees. * * When asynchronous operations are composed from other asynchronous * operations, all intermediate handlers should be invoked using the same * method as the final handler. This is required to ensure that user-defined * objects are not accessed in a way that may violate the guarantees. This * hooking function ensures that the invoked method used for the final handler * is accessible at each intermediate step. * * Implement asio_handler_invoke for your own handlers to specify a custom * invocation strategy. * * This default implementation invokes the function object like so: * @code function(); @endcode * If necessary, the default implementation makes a copy of the function object * so that the non-const operator() can be used. * * @par Example * @code * class my_handler; * * template * void asio_handler_invoke(Function function, my_handler* context) * { * context->strand_.dispatch(function); * } * @endcode */ /*@{*/ /// Default handler invocation hook used for non-const function objects. template inline void asio_handler_invoke(Function& function, ...) { function(); } /// Default handler invocation hook used for const function objects. template inline void asio_handler_invoke(const Function& function, ...) { Function tmp(function); tmp(); } /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_HANDLER_INVOKE_HOOK_HPP galera-3-25.3.20/asio/asio/version.hpp0000644000015300001660000000120413042054732017217 0ustar jenkinsjenkins// // version.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_VERSION_HPP #define ASIO_VERSION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) // ASIO_VERSION % 100 is the sub-minor version // ASIO_VERSION / 100 % 1000 is the minor version // ASIO_VERSION / 100000 is the major version #define ASIO_VERSION 101006 // 1.10.6 #endif // ASIO_VERSION_HPP galera-3-25.3.20/asio/asio/unyield.hpp0000644000015300001660000000057513042054732017215 0ustar jenkinsjenkins// // unyield.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifdef reenter # undef reenter #endif #ifdef yield # undef yield #endif #ifdef fork # undef fork #endif galera-3-25.3.20/asio/asio/ip/0000755000015300001660000000000013042054732015434 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ip/detail/0000755000015300001660000000000013042054732016676 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ip/detail/endpoint.hpp0000644000015300001660000000667213042054732021242 0ustar jenkinsjenkins// // ip/detail/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_DETAIL_ENDPOINT_HPP #define ASIO_IP_DETAIL_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/error_code.hpp" #include "asio/ip/address.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace detail { // Helper class for implementating an IP endpoint. class endpoint { public: // Default constructor. ASIO_DECL endpoint(); // Construct an endpoint using a family and port number. ASIO_DECL endpoint(int family, unsigned short port_num); // Construct an endpoint using an address and port number. ASIO_DECL endpoint(const asio::ip::address& addr, unsigned short port_num); // Copy constructor. endpoint(const endpoint& other) : data_(other.data_) { } // Assign from another endpoint. endpoint& operator=(const endpoint& other) { data_ = other.data_; return *this; } // Get the underlying endpoint in the native type. asio::detail::socket_addr_type* data() { return &data_.base; } // Get the underlying endpoint in the native type. const asio::detail::socket_addr_type* data() const { return &data_.base; } // Get the underlying size of the endpoint in the native type. std::size_t size() const { if (is_v4()) return sizeof(asio::detail::sockaddr_in4_type); else return sizeof(asio::detail::sockaddr_in6_type); } // Set the underlying size of the endpoint in the native type. ASIO_DECL void resize(std::size_t new_size); // Get the capacity of the endpoint in the native type. std::size_t capacity() const { return sizeof(data_); } // Get the port associated with the endpoint. ASIO_DECL unsigned short port() const; // Set the port associated with the endpoint. ASIO_DECL void port(unsigned short port_num); // Get the IP address associated with the endpoint. ASIO_DECL asio::ip::address address() const; // Set the IP address associated with the endpoint. ASIO_DECL void address(const asio::ip::address& addr); // Compare two endpoints for equality. ASIO_DECL friend bool operator==( const endpoint& e1, const endpoint& e2); // Compare endpoints for ordering. ASIO_DECL friend bool operator<( const endpoint& e1, const endpoint& e2); // Determine whether the endpoint is IPv4. bool is_v4() const { return data_.base.sa_family == ASIO_OS_DEF(AF_INET); } #if !defined(ASIO_NO_IOSTREAM) // Convert to a string. ASIO_DECL std::string to_string(asio::error_code& ec) const; #endif // !defined(ASIO_NO_IOSTREAM) private: // The underlying IP socket address. union data_union { asio::detail::socket_addr_type base; asio::detail::sockaddr_in4_type v4; asio::detail::sockaddr_in6_type v6; } data_; }; } // namespace detail } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/detail/impl/endpoint.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_DETAIL_ENDPOINT_HPP galera-3-25.3.20/asio/asio/ip/detail/socket_option.hpp0000644000015300001660000003346213042054732022277 0ustar jenkinsjenkins// // detail/socket_option.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_DETAIL_SOCKET_OPTION_HPP #define ASIO_IP_DETAIL_SOCKET_OPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/ip/address.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace detail { namespace socket_option { // Helper template for implementing multicast enable loopback options. template class multicast_enable_loopback { public: #if defined(__sun) || defined(__osf__) typedef unsigned char ipv4_value_type; typedef unsigned char ipv6_value_type; #elif defined(_AIX) || defined(__hpux) || defined(__QNXNTO__) typedef unsigned char ipv4_value_type; typedef unsigned int ipv6_value_type; #else typedef int ipv4_value_type; typedef int ipv6_value_type; #endif // Default constructor. multicast_enable_loopback() : ipv4_value_(0), ipv6_value_(0) { } // Construct with a specific option value. explicit multicast_enable_loopback(bool v) : ipv4_value_(v ? 1 : 0), ipv6_value_(v ? 1 : 0) { } // Set the value of the boolean. multicast_enable_loopback& operator=(bool v) { ipv4_value_ = v ? 1 : 0; ipv6_value_ = v ? 1 : 0; return *this; } // Get the current value of the boolean. bool value() const { return !!ipv4_value_; } // Convert to bool. operator bool() const { return !!ipv4_value_; } // Test for false. bool operator!() const { return !ipv4_value_; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the boolean data. template void* data(const Protocol& protocol) { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the address of the boolean data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the boolean data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } // Set the size of the boolean data. template void resize(const Protocol& protocol, std::size_t s) { if (protocol.family() == PF_INET6) { if (s != sizeof(ipv6_value_)) { std::length_error ex("multicast_enable_loopback socket option resize"); asio::detail::throw_exception(ex); } ipv4_value_ = ipv6_value_ ? 1 : 0; } else { if (s != sizeof(ipv4_value_)) { std::length_error ex("multicast_enable_loopback socket option resize"); asio::detail::throw_exception(ex); } ipv6_value_ = ipv4_value_ ? 1 : 0; } } private: ipv4_value_type ipv4_value_; ipv6_value_type ipv6_value_; }; // Helper template for implementing unicast hops options. template class unicast_hops { public: // Default constructor. unicast_hops() : value_(0) { } // Construct with a specific option value. explicit unicast_hops(int v) : value_(v) { } // Set the value of the option. unicast_hops& operator=(int v) { value_ = v; return *this; } // Get the current value of the option. int value() const { return value_; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the data. template int* data(const Protocol&) { return &value_; } // Get the address of the data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("unicast hops socket option resize"); asio::detail::throw_exception(ex); } #if defined(__hpux) if (value_ < 0) value_ = value_ & 0xFF; #endif } private: int value_; }; // Helper template for implementing multicast hops options. template class multicast_hops { public: #if defined(ASIO_WINDOWS) && defined(UNDER_CE) typedef int ipv4_value_type; #else typedef unsigned char ipv4_value_type; #endif typedef int ipv6_value_type; // Default constructor. multicast_hops() : ipv4_value_(0), ipv6_value_(0) { } // Construct with a specific option value. explicit multicast_hops(int v) { if (v < 0 || v > 255) { std::out_of_range ex("multicast hops value out of range"); asio::detail::throw_exception(ex); } ipv4_value_ = (ipv4_value_type)v; ipv6_value_ = v; } // Set the value of the option. multicast_hops& operator=(int v) { if (v < 0 || v > 255) { std::out_of_range ex("multicast hops value out of range"); asio::detail::throw_exception(ex); } ipv4_value_ = (ipv4_value_type)v; ipv6_value_ = v; return *this; } // Get the current value of the option. int value() const { return ipv6_value_; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the data. template void* data(const Protocol& protocol) { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the address of the data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } // Set the size of the data. template void resize(const Protocol& protocol, std::size_t s) { if (protocol.family() == PF_INET6) { if (s != sizeof(ipv6_value_)) { std::length_error ex("multicast hops socket option resize"); asio::detail::throw_exception(ex); } if (ipv6_value_ < 0) ipv4_value_ = 0; else if (ipv6_value_ > 255) ipv4_value_ = 255; else ipv4_value_ = (ipv4_value_type)ipv6_value_; } else { if (s != sizeof(ipv4_value_)) { std::length_error ex("multicast hops socket option resize"); asio::detail::throw_exception(ex); } ipv6_value_ = ipv4_value_; } } private: ipv4_value_type ipv4_value_; ipv6_value_type ipv6_value_; }; // Helper template for implementing ip_mreq-based options. template class multicast_request { public: // Default constructor. multicast_request() : ipv4_value_(), // Zero-initialisation gives the "any" address. ipv6_value_() // Zero-initialisation gives the "any" address. { } // Construct with multicast address only. explicit multicast_request(const asio::ip::address& multicast_address) : ipv4_value_(), // Zero-initialisation gives the "any" address. ipv6_value_() // Zero-initialisation gives the "any" address. { if (multicast_address.is_v6()) { using namespace std; // For memcpy. asio::ip::address_v6 ipv6_address = multicast_address.to_v6(); asio::ip::address_v6::bytes_type bytes = ipv6_address.to_bytes(); memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16); ipv6_value_.ipv6mr_interface = ipv6_address.scope_id(); } else { ipv4_value_.imr_multiaddr.s_addr = asio::detail::socket_ops::host_to_network_long( multicast_address.to_v4().to_ulong()); ipv4_value_.imr_interface.s_addr = asio::detail::socket_ops::host_to_network_long( asio::ip::address_v4::any().to_ulong()); } } // Construct with multicast address and IPv4 address specifying an interface. explicit multicast_request( const asio::ip::address_v4& multicast_address, const asio::ip::address_v4& network_interface = asio::ip::address_v4::any()) : ipv6_value_() // Zero-initialisation gives the "any" address. { ipv4_value_.imr_multiaddr.s_addr = asio::detail::socket_ops::host_to_network_long( multicast_address.to_ulong()); ipv4_value_.imr_interface.s_addr = asio::detail::socket_ops::host_to_network_long( network_interface.to_ulong()); } // Construct with multicast address and IPv6 network interface index. explicit multicast_request( const asio::ip::address_v6& multicast_address, unsigned long network_interface = 0) : ipv4_value_() // Zero-initialisation gives the "any" address. { using namespace std; // For memcpy. asio::ip::address_v6::bytes_type bytes = multicast_address.to_bytes(); memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16); if (network_interface) ipv6_value_.ipv6mr_interface = network_interface; else ipv6_value_.ipv6mr_interface = multicast_address.scope_id(); } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the option data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the option data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } private: asio::detail::in4_mreq_type ipv4_value_; asio::detail::in6_mreq_type ipv6_value_; }; // Helper template for implementing options that specify a network interface. template class network_interface { public: // Default constructor. network_interface() { ipv4_value_.s_addr = asio::detail::socket_ops::host_to_network_long( asio::ip::address_v4::any().to_ulong()); ipv6_value_ = 0; } // Construct with IPv4 interface. explicit network_interface(const asio::ip::address_v4& ipv4_interface) { ipv4_value_.s_addr = asio::detail::socket_ops::host_to_network_long( ipv4_interface.to_ulong()); ipv6_value_ = 0; } // Construct with IPv6 interface. explicit network_interface(unsigned int ipv6_interface) { ipv4_value_.s_addr = asio::detail::socket_ops::host_to_network_long( asio::ip::address_v4::any().to_ulong()); ipv6_value_ = ipv6_interface; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the option data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the option data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } private: asio::detail::in4_addr_type ipv4_value_; unsigned int ipv6_value_; }; } // namespace socket_option } // namespace detail } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_DETAIL_SOCKET_OPTION_HPP galera-3-25.3.20/asio/asio/ip/detail/impl/0000755000015300001660000000000013042054732017637 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ip/detail/impl/endpoint.ipp0000644000015300001660000001252213042054732022173 0ustar jenkinsjenkins// // ip/detail/impl/endpoint.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP #define ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/ip/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace detail { endpoint::endpoint() : data_() { data_.v4.sin_family = ASIO_OS_DEF(AF_INET); data_.v4.sin_port = 0; data_.v4.sin_addr.s_addr = ASIO_OS_DEF(INADDR_ANY); } endpoint::endpoint(int family, unsigned short port_num) : data_() { using namespace std; // For memcpy. if (family == ASIO_OS_DEF(AF_INET)) { data_.v4.sin_family = ASIO_OS_DEF(AF_INET); data_.v4.sin_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v4.sin_addr.s_addr = ASIO_OS_DEF(INADDR_ANY); } else { data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6); data_.v6.sin6_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v6.sin6_flowinfo = 0; data_.v6.sin6_addr.s6_addr[0] = 0; data_.v6.sin6_addr.s6_addr[1] = 0; data_.v6.sin6_addr.s6_addr[2] = 0, data_.v6.sin6_addr.s6_addr[3] = 0; data_.v6.sin6_addr.s6_addr[4] = 0, data_.v6.sin6_addr.s6_addr[5] = 0; data_.v6.sin6_addr.s6_addr[6] = 0, data_.v6.sin6_addr.s6_addr[7] = 0; data_.v6.sin6_addr.s6_addr[8] = 0, data_.v6.sin6_addr.s6_addr[9] = 0; data_.v6.sin6_addr.s6_addr[10] = 0, data_.v6.sin6_addr.s6_addr[11] = 0; data_.v6.sin6_addr.s6_addr[12] = 0, data_.v6.sin6_addr.s6_addr[13] = 0; data_.v6.sin6_addr.s6_addr[14] = 0, data_.v6.sin6_addr.s6_addr[15] = 0; data_.v6.sin6_scope_id = 0; } } endpoint::endpoint(const asio::ip::address& addr, unsigned short port_num) : data_() { using namespace std; // For memcpy. if (addr.is_v4()) { data_.v4.sin_family = ASIO_OS_DEF(AF_INET); data_.v4.sin_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v4.sin_addr.s_addr = asio::detail::socket_ops::host_to_network_long( static_cast( addr.to_v4().to_ulong())); } else { data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6); data_.v6.sin6_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v6.sin6_flowinfo = 0; asio::ip::address_v6 v6_addr = addr.to_v6(); asio::ip::address_v6::bytes_type bytes = v6_addr.to_bytes(); memcpy(data_.v6.sin6_addr.s6_addr, bytes.data(), 16); data_.v6.sin6_scope_id = static_cast( v6_addr.scope_id()); } } void endpoint::resize(std::size_t new_size) { if (new_size > sizeof(asio::detail::sockaddr_storage_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } } unsigned short endpoint::port() const { if (is_v4()) { return asio::detail::socket_ops::network_to_host_short( data_.v4.sin_port); } else { return asio::detail::socket_ops::network_to_host_short( data_.v6.sin6_port); } } void endpoint::port(unsigned short port_num) { if (is_v4()) { data_.v4.sin_port = asio::detail::socket_ops::host_to_network_short(port_num); } else { data_.v6.sin6_port = asio::detail::socket_ops::host_to_network_short(port_num); } } asio::ip::address endpoint::address() const { using namespace std; // For memcpy. if (is_v4()) { return asio::ip::address_v4( asio::detail::socket_ops::network_to_host_long( data_.v4.sin_addr.s_addr)); } else { asio::ip::address_v6::bytes_type bytes; #if defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.data(), data_.v6.sin6_addr.s6_addr, 16); #else // defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.elems, data_.v6.sin6_addr.s6_addr, 16); #endif // defined(ASIO_HAS_STD_ARRAY) return asio::ip::address_v6(bytes, data_.v6.sin6_scope_id); } } void endpoint::address(const asio::ip::address& addr) { endpoint tmp_endpoint(addr, port()); data_ = tmp_endpoint.data_; } bool operator==(const endpoint& e1, const endpoint& e2) { return e1.address() == e2.address() && e1.port() == e2.port(); } bool operator<(const endpoint& e1, const endpoint& e2) { if (e1.address() < e2.address()) return true; if (e1.address() != e2.address()) return false; return e1.port() < e2.port(); } #if !defined(ASIO_NO_IOSTREAM) std::string endpoint::to_string(asio::error_code& ec) const { std::string a = address().to_string(ec); if (ec) return std::string(); std::ostringstream tmp_os; tmp_os.imbue(std::locale::classic()); if (is_v4()) tmp_os << a; else tmp_os << '[' << a << ']'; tmp_os << ':' << port(); return tmp_os.str(); } #endif // !defined(ASIO_NO_IOSTREAM) } // namespace detail } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP galera-3-25.3.20/asio/asio/ip/tcp.hpp0000644000015300001660000000700713042054732016737 0ustar jenkinsjenkins// // ip/tcp.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_TCP_HPP #define ASIO_IP_TCP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Encapsulates the flags needed for TCP. /** * The asio::ip::tcp class contains flags necessary for TCP sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol, InternetProtocol. */ class tcp { public: /// The type of a TCP endpoint. typedef basic_endpoint endpoint; /// Construct to represent the IPv4 TCP protocol. static tcp v4() { return tcp(ASIO_OS_DEF(AF_INET)); } /// Construct to represent the IPv6 TCP protocol. static tcp v6() { return tcp(ASIO_OS_DEF(AF_INET6)); } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_STREAM); } /// Obtain an identifier for the protocol. int protocol() const { return ASIO_OS_DEF(IPPROTO_TCP); } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// The TCP socket type. typedef basic_stream_socket socket; /// The TCP acceptor type. typedef basic_socket_acceptor acceptor; /// The TCP resolver type. typedef basic_resolver resolver; #if !defined(ASIO_NO_IOSTREAM) /// The TCP iostream type. typedef basic_socket_iostream iostream; #endif // !defined(ASIO_NO_IOSTREAM) /// Socket option for disabling the Nagle algorithm. /** * Implements the IPPROTO_TCP/TCP_NODELAY socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined no_delay; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(IPPROTO_TCP), ASIO_OS_DEF(TCP_NODELAY)> no_delay; #endif /// Compare two protocols for equality. friend bool operator==(const tcp& p1, const tcp& p2) { return p1.family_ == p2.family_; } /// Compare two protocols for inequality. friend bool operator!=(const tcp& p1, const tcp& p2) { return p1.family_ != p2.family_; } private: // Construct with a specific family. explicit tcp(int protocol_family) : family_(protocol_family) { } int family_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_TCP_HPP galera-3-25.3.20/asio/asio/ip/address_v6.hpp0000644000015300001660000001540213042054732020207 0ustar jenkinsjenkins// // ip/address_v6.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V6_HPP #define ASIO_IP_ADDRESS_V6_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/array.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/error_code.hpp" #include "asio/ip/address_v4.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Implements IP version 6 style addresses. /** * The asio::ip::address_v6 class provides the ability to use and * manipulate IP version 6 addresses. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class address_v6 { public: /// The type used to represent an address as an array of bytes. /** * @note This type is defined in terms of the C++0x template @c std::array * when it is available. Otherwise, it uses @c boost:array. */ #if defined(GENERATING_DOCUMENTATION) typedef array bytes_type; #else typedef asio::detail::array bytes_type; #endif /// Default constructor. ASIO_DECL address_v6(); /// Construct an address from raw bytes and scope ID. ASIO_DECL explicit address_v6(const bytes_type& bytes, unsigned long scope_id = 0); /// Copy constructor. ASIO_DECL address_v6(const address_v6& other); #if defined(ASIO_HAS_MOVE) /// Move constructor. ASIO_DECL address_v6(address_v6&& other); #endif // defined(ASIO_HAS_MOVE) /// Assign from another address. ASIO_DECL address_v6& operator=(const address_v6& other); #if defined(ASIO_HAS_MOVE) /// Move-assign from another address. ASIO_DECL address_v6& operator=(address_v6&& other); #endif // defined(ASIO_HAS_MOVE) /// The scope ID of the address. /** * Returns the scope ID associated with the IPv6 address. */ unsigned long scope_id() const { return scope_id_; } /// The scope ID of the address. /** * Modifies the scope ID associated with the IPv6 address. */ void scope_id(unsigned long id) { scope_id_ = id; } /// Get the address in bytes, in network byte order. ASIO_DECL bytes_type to_bytes() const; /// Get the address as a string. ASIO_DECL std::string to_string() const; /// Get the address as a string. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// Create an address from an IP address string. ASIO_DECL static address_v6 from_string(const char* str); /// Create an address from an IP address string. ASIO_DECL static address_v6 from_string( const char* str, asio::error_code& ec); /// Create an address from an IP address string. ASIO_DECL static address_v6 from_string(const std::string& str); /// Create an address from an IP address string. ASIO_DECL static address_v6 from_string( const std::string& str, asio::error_code& ec); /// Converts an IPv4-mapped or IPv4-compatible address to an IPv4 address. ASIO_DECL address_v4 to_v4() const; /// Determine whether the address is a loopback address. ASIO_DECL bool is_loopback() const; /// Determine whether the address is unspecified. ASIO_DECL bool is_unspecified() const; /// Determine whether the address is link local. ASIO_DECL bool is_link_local() const; /// Determine whether the address is site local. ASIO_DECL bool is_site_local() const; /// Determine whether the address is a mapped IPv4 address. ASIO_DECL bool is_v4_mapped() const; /// Determine whether the address is an IPv4-compatible address. ASIO_DECL bool is_v4_compatible() const; /// Determine whether the address is a multicast address. ASIO_DECL bool is_multicast() const; /// Determine whether the address is a global multicast address. ASIO_DECL bool is_multicast_global() const; /// Determine whether the address is a link-local multicast address. ASIO_DECL bool is_multicast_link_local() const; /// Determine whether the address is a node-local multicast address. ASIO_DECL bool is_multicast_node_local() const; /// Determine whether the address is a org-local multicast address. ASIO_DECL bool is_multicast_org_local() const; /// Determine whether the address is a site-local multicast address. ASIO_DECL bool is_multicast_site_local() const; /// Compare two addresses for equality. ASIO_DECL friend bool operator==( const address_v6& a1, const address_v6& a2); /// Compare two addresses for inequality. friend bool operator!=(const address_v6& a1, const address_v6& a2) { return !(a1 == a2); } /// Compare addresses for ordering. ASIO_DECL friend bool operator<( const address_v6& a1, const address_v6& a2); /// Compare addresses for ordering. friend bool operator>(const address_v6& a1, const address_v6& a2) { return a2 < a1; } /// Compare addresses for ordering. friend bool operator<=(const address_v6& a1, const address_v6& a2) { return !(a2 < a1); } /// Compare addresses for ordering. friend bool operator>=(const address_v6& a1, const address_v6& a2) { return !(a1 < a2); } /// Obtain an address object that represents any address. static address_v6 any() { return address_v6(); } /// Obtain an address object that represents the loopback address. ASIO_DECL static address_v6 loopback(); /// Create an IPv4-mapped IPv6 address. ASIO_DECL static address_v6 v4_mapped(const address_v4& addr); /// Create an IPv4-compatible IPv6 address. ASIO_DECL static address_v6 v4_compatible(const address_v4& addr); private: // The underlying IPv6 address. asio::detail::in6_addr_type addr_; // The scope ID associated with the address. unsigned long scope_id_; }; #if !defined(ASIO_NO_IOSTREAM) /// Output an address as a string. /** * Used to output a human-readable string for a specified address. * * @param os The output stream to which the string will be written. * * @param addr The address to be written. * * @return The output stream. * * @relates asio::ip::address_v6 */ template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v6& addr); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/address_v6.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/address_v6.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_ADDRESS_V6_HPP galera-3-25.3.20/asio/asio/ip/resolver_query_base.hpp0000644000015300001660000000717313042054732022235 0ustar jenkinsjenkins// // ip/resolver_query_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_RESOLVER_QUERY_BASE_HPP #define ASIO_IP_RESOLVER_QUERY_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// The resolver_query_base class is used as a base for the /// basic_resolver_query class templates to provide a common place to define /// the flag constants. class resolver_query_base { public: #if defined(GENERATING_DOCUMENTATION) /// A bitmask type (C++ Std [lib.bitmask.types]). typedef unspecified flags; /// Determine the canonical name of the host specified in the query. static const flags canonical_name = implementation_defined; /// Indicate that returned endpoint is intended for use as a locally bound /// socket endpoint. static const flags passive = implementation_defined; /// Host name should be treated as a numeric string defining an IPv4 or IPv6 /// address and no name resolution should be attempted. static const flags numeric_host = implementation_defined; /// Service name should be treated as a numeric string defining a port number /// and no name resolution should be attempted. static const flags numeric_service = implementation_defined; /// If the query protocol family is specified as IPv6, return IPv4-mapped /// IPv6 addresses on finding no IPv6 addresses. static const flags v4_mapped = implementation_defined; /// If used with v4_mapped, return all matching IPv6 and IPv4 addresses. static const flags all_matching = implementation_defined; /// Only return IPv4 addresses if a non-loopback IPv4 address is configured /// for the system. Only return IPv6 addresses if a non-loopback IPv6 address /// is configured for the system. static const flags address_configured = implementation_defined; #else enum flags { canonical_name = ASIO_OS_DEF(AI_CANONNAME), passive = ASIO_OS_DEF(AI_PASSIVE), numeric_host = ASIO_OS_DEF(AI_NUMERICHOST), numeric_service = ASIO_OS_DEF(AI_NUMERICSERV), v4_mapped = ASIO_OS_DEF(AI_V4MAPPED), all_matching = ASIO_OS_DEF(AI_ALL), address_configured = ASIO_OS_DEF(AI_ADDRCONFIG) }; // Implement bitmask operations as shown in C++ Std [lib.bitmask.types]. friend flags operator&(flags x, flags y) { return static_cast( static_cast(x) & static_cast(y)); } friend flags operator|(flags x, flags y) { return static_cast( static_cast(x) | static_cast(y)); } friend flags operator^(flags x, flags y) { return static_cast( static_cast(x) ^ static_cast(y)); } friend flags operator~(flags x) { return static_cast(~static_cast(x)); } friend flags& operator&=(flags& x, flags y) { x = x & y; return x; } friend flags& operator|=(flags& x, flags y) { x = x | y; return x; } friend flags& operator^=(flags& x, flags y) { x = x ^ y; return x; } #endif protected: /// Protected destructor to prevent deletion through this type. ~resolver_query_base() { } }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_RESOLVER_QUERY_BASE_HPP galera-3-25.3.20/asio/asio/ip/resolver_service.hpp0000644000015300001660000001141413042054732021527 0ustar jenkinsjenkins// // ip/resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_RESOLVER_SERVICE_HPP #define ASIO_IP_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/error_code.hpp" #include "asio/io_service.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_resolver_service.hpp" #else # include "asio/detail/resolver_service.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Default service implementation for a resolver. template class resolver_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base< resolver_service > #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The protocol type. typedef InternetProtocol protocol_type; /// The endpoint type. typedef typename InternetProtocol::endpoint endpoint_type; /// The query type. typedef basic_resolver_query query_type; /// The iterator type. typedef basic_resolver_iterator iterator_type; private: // The type of the platform-specific implementation. #if defined(ASIO_WINDOWS_RUNTIME) typedef asio::detail::winrt_resolver_service service_impl_type; #else typedef asio::detail::resolver_service service_impl_type; #endif public: /// The type of a resolver implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef typename service_impl_type::implementation_type implementation_type; #endif /// Construct a new resolver service for the specified io_service. explicit resolver_service(asio::io_service& io_service) : asio::detail::service_base< resolver_service >(io_service), service_impl_(io_service) { } /// Construct a new resolver implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } /// Destroy a resolver implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Cancel pending asynchronous operations. void cancel(implementation_type& impl) { service_impl_.cancel(impl); } /// Resolve a query to a list of entries. iterator_type resolve(implementation_type& impl, const query_type& query, asio::error_code& ec) { return service_impl_.resolve(impl, query, ec); } /// Asynchronously resolve a query to a list of entries. template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, iterator_type)) async_resolve(implementation_type& impl, const query_type& query, ASIO_MOVE_ARG(ResolveHandler) handler) { asio::detail::async_result_init< ResolveHandler, void (asio::error_code, iterator_type)> init( ASIO_MOVE_CAST(ResolveHandler)(handler)); service_impl_.async_resolve(impl, query, init.handler); return init.result.get(); } /// Resolve an endpoint to a list of entries. iterator_type resolve(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { return service_impl_.resolve(impl, endpoint, ec); } /// Asynchronously resolve an endpoint to a list of entries. template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, iterator_type)) async_resolve(implementation_type& impl, const endpoint_type& endpoint, ASIO_MOVE_ARG(ResolveHandler) handler) { asio::detail::async_result_init< ResolveHandler, void (asio::error_code, iterator_type)> init( ASIO_MOVE_CAST(ResolveHandler)(handler)); service_impl_.async_resolve(impl, endpoint, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // Perform any fork-related housekeeping. void fork_service(asio::io_service::fork_event event) { service_impl_.fork_service(event); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_RESOLVER_SERVICE_HPP galera-3-25.3.20/asio/asio/ip/address.hpp0000644000015300001660000001257713042054732017606 0ustar jenkinsjenkins// // ip/address.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_HPP #define ASIO_IP_ADDRESS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error_code.hpp" #include "asio/ip/address_v4.hpp" #include "asio/ip/address_v6.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Implements version-independent IP addresses. /** * The asio::ip::address class provides the ability to use either IP * version 4 or version 6 addresses. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class address { public: /// Default constructor. ASIO_DECL address(); /// Construct an address from an IPv4 address. ASIO_DECL address(const asio::ip::address_v4& ipv4_address); /// Construct an address from an IPv6 address. ASIO_DECL address(const asio::ip::address_v6& ipv6_address); /// Copy constructor. ASIO_DECL address(const address& other); #if defined(ASIO_HAS_MOVE) /// Move constructor. ASIO_DECL address(address&& other); #endif // defined(ASIO_HAS_MOVE) /// Assign from another address. ASIO_DECL address& operator=(const address& other); #if defined(ASIO_HAS_MOVE) /// Move-assign from another address. ASIO_DECL address& operator=(address&& other); #endif // defined(ASIO_HAS_MOVE) /// Assign from an IPv4 address. ASIO_DECL address& operator=( const asio::ip::address_v4& ipv4_address); /// Assign from an IPv6 address. ASIO_DECL address& operator=( const asio::ip::address_v6& ipv6_address); /// Get whether the address is an IP version 4 address. bool is_v4() const { return type_ == ipv4; } /// Get whether the address is an IP version 6 address. bool is_v6() const { return type_ == ipv6; } /// Get the address as an IP version 4 address. ASIO_DECL asio::ip::address_v4 to_v4() const; /// Get the address as an IP version 6 address. ASIO_DECL asio::ip::address_v6 to_v6() const; /// Get the address as a string in dotted decimal format. ASIO_DECL std::string to_string() const; /// Get the address as a string in dotted decimal format. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. ASIO_DECL static address from_string(const char* str); /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. ASIO_DECL static address from_string( const char* str, asio::error_code& ec); /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. ASIO_DECL static address from_string(const std::string& str); /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. ASIO_DECL static address from_string( const std::string& str, asio::error_code& ec); /// Determine whether the address is a loopback address. ASIO_DECL bool is_loopback() const; /// Determine whether the address is unspecified. ASIO_DECL bool is_unspecified() const; /// Determine whether the address is a multicast address. ASIO_DECL bool is_multicast() const; /// Compare two addresses for equality. ASIO_DECL friend bool operator==(const address& a1, const address& a2); /// Compare two addresses for inequality. friend bool operator!=(const address& a1, const address& a2) { return !(a1 == a2); } /// Compare addresses for ordering. ASIO_DECL friend bool operator<(const address& a1, const address& a2); /// Compare addresses for ordering. friend bool operator>(const address& a1, const address& a2) { return a2 < a1; } /// Compare addresses for ordering. friend bool operator<=(const address& a1, const address& a2) { return !(a2 < a1); } /// Compare addresses for ordering. friend bool operator>=(const address& a1, const address& a2) { return !(a1 < a2); } private: // The type of the address. enum { ipv4, ipv6 } type_; // The underlying IPv4 address. asio::ip::address_v4 ipv4_address_; // The underlying IPv6 address. asio::ip::address_v6 ipv6_address_; }; #if !defined(ASIO_NO_IOSTREAM) /// Output an address as a string. /** * Used to output a human-readable string for a specified address. * * @param os The output stream to which the string will be written. * * @param addr The address to be written. * * @return The output stream. * * @relates asio::ip::address */ template std::basic_ostream& operator<<( std::basic_ostream& os, const address& addr); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/address.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/address.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_ADDRESS_HPP galera-3-25.3.20/asio/asio/ip/basic_endpoint.hpp0000644000015300001660000001476213042054732021140 0ustar jenkinsjenkins// // ip/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_ENDPOINT_HPP #define ASIO_IP_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/address.hpp" #include "asio/ip/detail/endpoint.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Describes an endpoint for a version-independent IP socket. /** * The asio::ip::basic_endpoint class template describes an endpoint that * may be associated with a particular socket. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * Endpoint. */ template class basic_endpoint { public: /// The protocol type associated with the endpoint. typedef InternetProtocol protocol_type; /// The type of the endpoint structure. This type is dependent on the /// underlying implementation of the socket layer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined data_type; #else typedef asio::detail::socket_addr_type data_type; #endif /// Default constructor. basic_endpoint() : impl_() { } /// Construct an endpoint using a port number, specified in the host's byte /// order. The IP address will be the any address (i.e. INADDR_ANY or /// in6addr_any). This constructor would typically be used for accepting new /// connections. /** * @par Examples * To initialise an IPv4 TCP endpoint for port 1234, use: * @code * asio::ip::tcp::endpoint ep(asio::ip::tcp::v4(), 1234); * @endcode * * To specify an IPv6 UDP endpoint for port 9876, use: * @code * asio::ip::udp::endpoint ep(asio::ip::udp::v6(), 9876); * @endcode */ basic_endpoint(const InternetProtocol& internet_protocol, unsigned short port_num) : impl_(internet_protocol.family(), port_num) { } /// Construct an endpoint using a port number and an IP address. This /// constructor may be used for accepting connections on a specific interface /// or for making a connection to a remote endpoint. basic_endpoint(const asio::ip::address& addr, unsigned short port_num) : impl_(addr, port_num) { } /// Copy constructor. basic_endpoint(const basic_endpoint& other) : impl_(other.impl_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_endpoint(basic_endpoint&& other) : impl_(other.impl_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another endpoint. basic_endpoint& operator=(const basic_endpoint& other) { impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another endpoint. basic_endpoint& operator=(basic_endpoint&& other) { impl_ = other.impl_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// The protocol associated with the endpoint. protocol_type protocol() const { if (impl_.is_v4()) return InternetProtocol::v4(); return InternetProtocol::v6(); } /// Get the underlying endpoint in the native type. data_type* data() { return impl_.data(); } /// Get the underlying endpoint in the native type. const data_type* data() const { return impl_.data(); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const { return impl_.size(); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t new_size) { impl_.resize(new_size); } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const { return impl_.capacity(); } /// Get the port associated with the endpoint. The port number is always in /// the host's byte order. unsigned short port() const { return impl_.port(); } /// Set the port associated with the endpoint. The port number is always in /// the host's byte order. void port(unsigned short port_num) { impl_.port(port_num); } /// Get the IP address associated with the endpoint. asio::ip::address address() const { return impl_.address(); } /// Set the IP address associated with the endpoint. void address(const asio::ip::address& addr) { impl_.address(addr); } /// Compare two endpoints for equality. friend bool operator==(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ == e2.impl_; } /// Compare two endpoints for inequality. friend bool operator!=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1 == e2); } /// Compare endpoints for ordering. friend bool operator<(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ < e2.impl_; } /// Compare endpoints for ordering. friend bool operator>(const basic_endpoint& e1, const basic_endpoint& e2) { return e2.impl_ < e1.impl_; } /// Compare endpoints for ordering. friend bool operator<=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e2 < e1); } /// Compare endpoints for ordering. friend bool operator>=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1 < e2); } private: // The underlying IP endpoint. asio::ip::detail::endpoint impl_; }; #if !defined(ASIO_NO_IOSTREAM) /// Output an endpoint as a string. /** * Used to output a human-readable string for a specified endpoint. * * @param os The output stream to which the string will be written. * * @param endpoint The endpoint to be written. * * @return The output stream. * * @relates asio::ip::basic_endpoint */ template std::basic_ostream& operator<<( std::basic_ostream& os, const basic_endpoint& endpoint); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/basic_endpoint.hpp" #endif // ASIO_IP_BASIC_ENDPOINT_HPP galera-3-25.3.20/asio/asio/ip/icmp.hpp0000644000015300001660000000504513042054732017101 0ustar jenkinsjenkins// // ip/icmp.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ICMP_HPP #define ASIO_IP_ICMP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_types.hpp" #include "asio/basic_raw_socket.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Encapsulates the flags needed for ICMP. /** * The asio::ip::icmp class contains flags necessary for ICMP sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol, InternetProtocol. */ class icmp { public: /// The type of a ICMP endpoint. typedef basic_endpoint endpoint; /// Construct to represent the IPv4 ICMP protocol. static icmp v4() { return icmp(ASIO_OS_DEF(IPPROTO_ICMP), ASIO_OS_DEF(AF_INET)); } /// Construct to represent the IPv6 ICMP protocol. static icmp v6() { return icmp(ASIO_OS_DEF(IPPROTO_ICMPV6), ASIO_OS_DEF(AF_INET6)); } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_RAW); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// The ICMP socket type. typedef basic_raw_socket socket; /// The ICMP resolver type. typedef basic_resolver resolver; /// Compare two protocols for equality. friend bool operator==(const icmp& p1, const icmp& p2) { return p1.protocol_ == p2.protocol_ && p1.family_ == p2.family_; } /// Compare two protocols for inequality. friend bool operator!=(const icmp& p1, const icmp& p2) { return p1.protocol_ != p2.protocol_ || p1.family_ != p2.family_; } private: // Construct with a specific family. explicit icmp(int protocol_id, int protocol_family) : protocol_(protocol_id), family_(protocol_family) { } int protocol_; int family_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ICMP_HPP galera-3-25.3.20/asio/asio/ip/address_v4.hpp0000644000015300001660000001470113042054732020206 0ustar jenkinsjenkins// // ip/address_v4.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V4_HPP #define ASIO_IP_ADDRESS_V4_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/array.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/error_code.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Implements IP version 4 style addresses. /** * The asio::ip::address_v4 class provides the ability to use and * manipulate IP version 4 addresses. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class address_v4 { public: /// The type used to represent an address as an array of bytes. /** * @note This type is defined in terms of the C++0x template @c std::array * when it is available. Otherwise, it uses @c boost:array. */ #if defined(GENERATING_DOCUMENTATION) typedef array bytes_type; #else typedef asio::detail::array bytes_type; #endif /// Default constructor. address_v4() { addr_.s_addr = 0; } /// Construct an address from raw bytes. ASIO_DECL explicit address_v4(const bytes_type& bytes); /// Construct an address from a unsigned long in host byte order. ASIO_DECL explicit address_v4(unsigned long addr); /// Copy constructor. address_v4(const address_v4& other) : addr_(other.addr_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. address_v4(address_v4&& other) : addr_(other.addr_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another address. address_v4& operator=(const address_v4& other) { addr_ = other.addr_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another address. address_v4& operator=(address_v4&& other) { addr_ = other.addr_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// Get the address in bytes, in network byte order. ASIO_DECL bytes_type to_bytes() const; /// Get the address as an unsigned long in host byte order ASIO_DECL unsigned long to_ulong() const; /// Get the address as a string in dotted decimal format. ASIO_DECL std::string to_string() const; /// Get the address as a string in dotted decimal format. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// Create an address from an IP address string in dotted decimal form. ASIO_DECL static address_v4 from_string(const char* str); /// Create an address from an IP address string in dotted decimal form. ASIO_DECL static address_v4 from_string( const char* str, asio::error_code& ec); /// Create an address from an IP address string in dotted decimal form. ASIO_DECL static address_v4 from_string(const std::string& str); /// Create an address from an IP address string in dotted decimal form. ASIO_DECL static address_v4 from_string( const std::string& str, asio::error_code& ec); /// Determine whether the address is a loopback address. ASIO_DECL bool is_loopback() const; /// Determine whether the address is unspecified. ASIO_DECL bool is_unspecified() const; /// Determine whether the address is a class A address. ASIO_DECL bool is_class_a() const; /// Determine whether the address is a class B address. ASIO_DECL bool is_class_b() const; /// Determine whether the address is a class C address. ASIO_DECL bool is_class_c() const; /// Determine whether the address is a multicast address. ASIO_DECL bool is_multicast() const; /// Compare two addresses for equality. friend bool operator==(const address_v4& a1, const address_v4& a2) { return a1.addr_.s_addr == a2.addr_.s_addr; } /// Compare two addresses for inequality. friend bool operator!=(const address_v4& a1, const address_v4& a2) { return a1.addr_.s_addr != a2.addr_.s_addr; } /// Compare addresses for ordering. friend bool operator<(const address_v4& a1, const address_v4& a2) { return a1.to_ulong() < a2.to_ulong(); } /// Compare addresses for ordering. friend bool operator>(const address_v4& a1, const address_v4& a2) { return a1.to_ulong() > a2.to_ulong(); } /// Compare addresses for ordering. friend bool operator<=(const address_v4& a1, const address_v4& a2) { return a1.to_ulong() <= a2.to_ulong(); } /// Compare addresses for ordering. friend bool operator>=(const address_v4& a1, const address_v4& a2) { return a1.to_ulong() >= a2.to_ulong(); } /// Obtain an address object that represents any address. static address_v4 any() { return address_v4(); } /// Obtain an address object that represents the loopback address. static address_v4 loopback() { return address_v4(0x7F000001); } /// Obtain an address object that represents the broadcast address. static address_v4 broadcast() { return address_v4(0xFFFFFFFF); } /// Obtain an address object that represents the broadcast address that /// corresponds to the specified address and netmask. ASIO_DECL static address_v4 broadcast( const address_v4& addr, const address_v4& mask); /// Obtain the netmask that corresponds to the address, based on its address /// class. ASIO_DECL static address_v4 netmask(const address_v4& addr); private: // The underlying IPv4 address. asio::detail::in4_addr_type addr_; }; #if !defined(ASIO_NO_IOSTREAM) /// Output an address as a string. /** * Used to output a human-readable string for a specified address. * * @param os The output stream to which the string will be written. * * @param addr The address to be written. * * @return The output stream. * * @relates asio::ip::address_v4 */ template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v4& addr); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/address_v4.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/address_v4.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_ADDRESS_V4_HPP galera-3-25.3.20/asio/asio/ip/basic_resolver_query.hpp0000644000015300001660000002203213042054732022373 0ustar jenkinsjenkins// // ip/basic_resolver_query.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_QUERY_HPP #define ASIO_IP_BASIC_RESOLVER_QUERY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_ops.hpp" #include "asio/ip/resolver_query_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// An query to be passed to a resolver. /** * The asio::ip::basic_resolver_query class template describes a query * that can be passed to a resolver. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_query : public resolver_query_base { public: /// The protocol type associated with the endpoint query. typedef InternetProtocol protocol_type; /// Construct with specified service name for any protocol. /** * This constructor is typically used to perform name resolution for local * service binding. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for local service * binding. * * @note On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const std::string& service, resolver_query_base::flags resolve_flags = passive | address_configured) : hints_(), host_name_(), service_name_(service) { typename InternetProtocol::endpoint endpoint; hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = PF_UNSPEC; hints_.ai_socktype = endpoint.protocol().type(); hints_.ai_protocol = endpoint.protocol().protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Construct with specified service name for a given protocol. /** * This constructor is typically used to perform name resolution for local * service binding with a specific protocol version. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for local service * binding. * * @note On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const protocol_type& protocol, const std::string& service, resolver_query_base::flags resolve_flags = passive | address_configured) : hints_(), host_name_(), service_name_(service) { hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = protocol.family(); hints_.ai_socktype = protocol.type(); hints_.ai_protocol = protocol.protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Construct with specified host name and service name for any protocol. /** * This constructor is typically used to perform name resolution for * communication with remote hosts. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const std::string& host, const std::string& service, resolver_query_base::flags resolve_flags = address_configured) : hints_(), host_name_(host), service_name_(service) { typename InternetProtocol::endpoint endpoint; hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = ASIO_OS_DEF(AF_UNSPEC); hints_.ai_socktype = endpoint.protocol().type(); hints_.ai_protocol = endpoint.protocol().protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Construct with specified host name and service name for a given protocol. /** * This constructor is typically used to perform name resolution for * communication with remote hosts. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const protocol_type& protocol, const std::string& host, const std::string& service, resolver_query_base::flags resolve_flags = address_configured) : hints_(), host_name_(host), service_name_(service) { hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = protocol.family(); hints_.ai_socktype = protocol.type(); hints_.ai_protocol = protocol.protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Get the hints associated with the query. const asio::detail::addrinfo_type& hints() const { return hints_; } /// Get the host name associated with the query. std::string host_name() const { return host_name_; } /// Get the service name associated with the query. std::string service_name() const { return service_name_; } private: asio::detail::addrinfo_type hints_; std::string host_name_; std::string service_name_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_QUERY_HPP galera-3-25.3.20/asio/asio/ip/basic_resolver_entry.hpp0000644000015300001660000000420013042054732022364 0ustar jenkinsjenkins// // ip/basic_resolver_entry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_ENTRY_HPP #define ASIO_IP_BASIC_RESOLVER_ENTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// An entry produced by a resolver. /** * The asio::ip::basic_resolver_entry class template describes an entry * as returned by a resolver. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_entry { public: /// The protocol type associated with the endpoint entry. typedef InternetProtocol protocol_type; /// The endpoint type associated with the endpoint entry. typedef typename InternetProtocol::endpoint endpoint_type; /// Default constructor. basic_resolver_entry() { } /// Construct with specified endpoint, host name and service name. basic_resolver_entry(const endpoint_type& ep, const std::string& host, const std::string& service) : endpoint_(ep), host_name_(host), service_name_(service) { } /// Get the endpoint associated with the entry. endpoint_type endpoint() const { return endpoint_; } /// Convert to the endpoint associated with the entry. operator endpoint_type() const { return endpoint_; } /// Get the host name associated with the entry. std::string host_name() const { return host_name_; } /// Get the service name associated with the entry. std::string service_name() const { return service_name_; } private: endpoint_type endpoint_; std::string host_name_; std::string service_name_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_ENTRY_HPP galera-3-25.3.20/asio/asio/ip/udp.hpp0000644000015300001660000000453613042054732016745 0ustar jenkinsjenkins// // ip/udp.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_UDP_HPP #define ASIO_IP_UDP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_datagram_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Encapsulates the flags needed for UDP. /** * The asio::ip::udp class contains flags necessary for UDP sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol, InternetProtocol. */ class udp { public: /// The type of a UDP endpoint. typedef basic_endpoint endpoint; /// Construct to represent the IPv4 UDP protocol. static udp v4() { return udp(ASIO_OS_DEF(AF_INET)); } /// Construct to represent the IPv6 UDP protocol. static udp v6() { return udp(ASIO_OS_DEF(AF_INET6)); } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_DGRAM); } /// Obtain an identifier for the protocol. int protocol() const { return ASIO_OS_DEF(IPPROTO_UDP); } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// The UDP socket type. typedef basic_datagram_socket socket; /// The UDP resolver type. typedef basic_resolver resolver; /// Compare two protocols for equality. friend bool operator==(const udp& p1, const udp& p2) { return p1.family_ == p2.family_; } /// Compare two protocols for inequality. friend bool operator!=(const udp& p1, const udp& p2) { return p1.family_ != p2.family_; } private: // Construct with a specific family. explicit udp(int protocol_family) : family_(protocol_family) { } int family_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_UDP_HPP galera-3-25.3.20/asio/asio/ip/host_name.hpp0000644000015300001660000000173013042054732020123 0ustar jenkinsjenkins// // ip/host_name.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_HOST_NAME_HPP #define ASIO_IP_HOST_NAME_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Get the current host name. ASIO_DECL std::string host_name(); /// Get the current host name. ASIO_DECL std::string host_name(asio::error_code& ec); } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/host_name.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_HOST_NAME_HPP galera-3-25.3.20/asio/asio/ip/multicast.hpp0000644000015300001660000001200713042054732020152 0ustar jenkinsjenkins// // ip/multicast.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_MULTICAST_HPP #define ASIO_IP_MULTICAST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/ip/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace multicast { /// Socket option to join a multicast group on a specified interface. /** * Implements the IPPROTO_IP/IP_ADD_MEMBERSHIP socket option. * * @par Examples * Setting the option to join a multicast group: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::address multicast_address = * asio::ip::address::from_string("225.0.0.1"); * asio::ip::multicast::join_group option(multicast_address); * socket.set_option(option); * @endcode * * @par Concepts: * SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined join_group; #else typedef asio::ip::detail::socket_option::multicast_request< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_ADD_MEMBERSHIP), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_JOIN_GROUP)> join_group; #endif /// Socket option to leave a multicast group on a specified interface. /** * Implements the IPPROTO_IP/IP_DROP_MEMBERSHIP socket option. * * @par Examples * Setting the option to leave a multicast group: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::address multicast_address = * asio::ip::address::from_string("225.0.0.1"); * asio::ip::multicast::leave_group option(multicast_address); * socket.set_option(option); * @endcode * * @par Concepts: * SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined leave_group; #else typedef asio::ip::detail::socket_option::multicast_request< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_DROP_MEMBERSHIP), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_LEAVE_GROUP)> leave_group; #endif /// Socket option for local interface to use for outgoing multicast packets. /** * Implements the IPPROTO_IP/IP_MULTICAST_IF socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::address_v4 local_interface = * asio::ip::address_v4::from_string("1.2.3.4"); * asio::ip::multicast::outbound_interface option(local_interface); * socket.set_option(option); * @endcode * * @par Concepts: * SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined outbound_interface; #else typedef asio::ip::detail::socket_option::network_interface< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_MULTICAST_IF), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_MULTICAST_IF)> outbound_interface; #endif /// Socket option for time-to-live associated with outgoing multicast packets. /** * Implements the IPPROTO_IP/IP_MULTICAST_TTL socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::multicast::hops option(4); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::multicast::hops option; * socket.get_option(option); * int ttl = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined hops; #else typedef asio::ip::detail::socket_option::multicast_hops< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_MULTICAST_TTL), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_MULTICAST_HOPS)> hops; #endif /// Socket option determining whether outgoing multicast packets will be /// received on the same socket if it is a member of the multicast group. /** * Implements the IPPROTO_IP/IP_MULTICAST_LOOP socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::multicast::enable_loopback option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::multicast::enable_loopback option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined enable_loopback; #else typedef asio::ip::detail::socket_option::multicast_enable_loopback< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_MULTICAST_LOOP), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_MULTICAST_LOOP)> enable_loopback; #endif } // namespace multicast } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_MULTICAST_HPP galera-3-25.3.20/asio/asio/ip/v6_only.hpp0000644000015300001660000000321213042054732017537 0ustar jenkinsjenkins// // ip/v6_only.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_V6_ONLY_HPP #define ASIO_IP_V6_ONLY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Socket option for determining whether an IPv6 socket supports IPv6 /// communication only. /** * Implements the IPPROTO_IPV6/IP_V6ONLY socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::v6_only option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::v6_only option; * socket.get_option(option); * bool v6_only = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined v6_only; #elif defined(IPV6_V6ONLY) typedef asio::detail::socket_option::boolean< IPPROTO_IPV6, IPV6_V6ONLY> v6_only; #else typedef asio::detail::socket_option::boolean< asio::detail::custom_socket_option_level, asio::detail::always_fail_option> v6_only; #endif } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_V6_ONLY_HPP galera-3-25.3.20/asio/asio/ip/basic_resolver_iterator.hpp0000644000015300001660000001657413042054732023075 0ustar jenkinsjenkins// // ip/basic_resolver_iterator.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP #define ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include "asio/detail/shared_ptr.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_resolver_entry.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_utils.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// An iterator over the entries produced by a resolver. /** * The asio::ip::basic_resolver_iterator class template is used to define * iterators over the results returned by a resolver. * * The iterator's value_type, obtained when the iterator is dereferenced, is: * @code const basic_resolver_entry @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_iterator { public: /// The type used for the distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of the value pointed to by the iterator. typedef basic_resolver_entry value_type; /// The type of the result of applying operator->() to the iterator. typedef const basic_resolver_entry* pointer; /// The type of the result of applying operator*() to the iterator. typedef const basic_resolver_entry& reference; /// The iterator category. typedef std::forward_iterator_tag iterator_category; /// Default constructor creates an end iterator. basic_resolver_iterator() : index_(0) { } /// Create an iterator from an addrinfo list returned by getaddrinfo. static basic_resolver_iterator create( asio::detail::addrinfo_type* address_info, const std::string& host_name, const std::string& service_name) { basic_resolver_iterator iter; if (!address_info) return iter; std::string actual_host_name = host_name; if (address_info->ai_canonname) actual_host_name = address_info->ai_canonname; iter.values_.reset(new values_type); while (address_info) { if (address_info->ai_family == ASIO_OS_DEF(AF_INET) || address_info->ai_family == ASIO_OS_DEF(AF_INET6)) { using namespace std; // For memcpy. typename InternetProtocol::endpoint endpoint; endpoint.resize(static_cast(address_info->ai_addrlen)); memcpy(endpoint.data(), address_info->ai_addr, address_info->ai_addrlen); iter.values_->push_back( basic_resolver_entry(endpoint, actual_host_name, service_name)); } address_info = address_info->ai_next; } return iter; } /// Create an iterator from an endpoint, host name and service name. static basic_resolver_iterator create( const typename InternetProtocol::endpoint& endpoint, const std::string& host_name, const std::string& service_name) { basic_resolver_iterator iter; iter.values_.reset(new values_type); iter.values_->push_back( basic_resolver_entry( endpoint, host_name, service_name)); return iter; } /// Create an iterator from a sequence of endpoints, host and service name. template static basic_resolver_iterator create( EndpointIterator begin, EndpointIterator end, const std::string& host_name, const std::string& service_name) { basic_resolver_iterator iter; if (begin != end) { iter.values_.reset(new values_type); for (EndpointIterator ep_iter = begin; ep_iter != end; ++ep_iter) { iter.values_->push_back( basic_resolver_entry( *ep_iter, host_name, service_name)); } } return iter; } #if defined(ASIO_WINDOWS_RUNTIME) /// Create an iterator from a Windows Runtime list of EndpointPair objects. static basic_resolver_iterator create( Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^ endpoints, const asio::detail::addrinfo_type& hints, const std::string& host_name, const std::string& service_name) { basic_resolver_iterator iter; if (endpoints->Size) { iter.values_.reset(new values_type); for (unsigned int i = 0; i < endpoints->Size; ++i) { auto pair = endpoints->GetAt(i); if (hints.ai_family == ASIO_OS_DEF(AF_INET) && pair->RemoteHostName->Type != Windows::Networking::HostNameType::Ipv4) continue; if (hints.ai_family == ASIO_OS_DEF(AF_INET6) && pair->RemoteHostName->Type != Windows::Networking::HostNameType::Ipv6) continue; iter.values_->push_back( basic_resolver_entry( typename InternetProtocol::endpoint( ip::address::from_string( asio::detail::winrt_utils::string( pair->RemoteHostName->CanonicalName)), asio::detail::winrt_utils::integer( pair->RemoteServiceName)), host_name, service_name)); } } return iter; } #endif // defined(ASIO_WINDOWS_RUNTIME) /// Dereference an iterator. const basic_resolver_entry& operator*() const { return dereference(); } /// Dereference an iterator. const basic_resolver_entry* operator->() const { return &dereference(); } /// Increment operator (prefix). basic_resolver_iterator& operator++() { increment(); return *this; } /// Increment operator (postfix). basic_resolver_iterator operator++(int) { basic_resolver_iterator tmp(*this); ++*this; return tmp; } /// Test two iterators for equality. friend bool operator==(const basic_resolver_iterator& a, const basic_resolver_iterator& b) { return a.equal(b); } /// Test two iterators for inequality. friend bool operator!=(const basic_resolver_iterator& a, const basic_resolver_iterator& b) { return !a.equal(b); } private: void increment() { if (++index_ == values_->size()) { // Reset state to match a default constructed end iterator. values_.reset(); index_ = 0; } } bool equal(const basic_resolver_iterator& other) const { if (!values_ && !other.values_) return true; if (values_ != other.values_) return false; return index_ == other.index_; } const basic_resolver_entry& dereference() const { return (*values_)[index_]; } typedef std::vector > values_type; asio::detail::shared_ptr values_; std::size_t index_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP galera-3-25.3.20/asio/asio/ip/unicast.hpp0000644000015300001660000000315713042054732017621 0ustar jenkinsjenkins// // ip/unicast.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_UNICAST_HPP #define ASIO_IP_UNICAST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/ip/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace unicast { /// Socket option for time-to-live associated with outgoing unicast packets. /** * Implements the IPPROTO_IP/IP_UNICAST_TTL socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::unicast::hops option(4); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::ip::unicast::hops option; * socket.get_option(option); * int ttl = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined hops; #else typedef asio::ip::detail::socket_option::unicast_hops< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_TTL), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_UNICAST_HOPS)> hops; #endif } // namespace unicast } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_UNICAST_HPP galera-3-25.3.20/asio/asio/ip/impl/0000755000015300001660000000000013042054732016375 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ip/impl/address_v6.hpp0000644000015300001660000000243213042054732021147 0ustar jenkinsjenkins// // ip/impl/address_v6.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V6_HPP #define ASIO_IP_IMPL_ADDRESS_V6_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v6& addr) { asio::error_code ec; std::string s = addr.to_string(ec); if (ec) { if (os.exceptions() & std::basic_ostream::failbit) asio::detail::throw_error(ec); else os.setstate(std::basic_ostream::failbit); } else for (std::string::iterator i = s.begin(); i != s.end(); ++i) os << os.widen(*i); return os; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_ADDRESS_V6_HPP galera-3-25.3.20/asio/asio/ip/impl/address.hpp0000644000015300001660000000241013042054732020530 0ustar jenkinsjenkins// // ip/impl/address.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_HPP #define ASIO_IP_IMPL_ADDRESS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const address& addr) { asio::error_code ec; std::string s = addr.to_string(ec); if (ec) { if (os.exceptions() & std::basic_ostream::failbit) asio::detail::throw_error(ec); else os.setstate(std::basic_ostream::failbit); } else for (std::string::iterator i = s.begin(); i != s.end(); ++i) os << os.widen(*i); return os; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_ADDRESS_HPP galera-3-25.3.20/asio/asio/ip/impl/basic_endpoint.hpp0000644000015300001660000000266313042054732022076 0ustar jenkinsjenkins// // ip/impl/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_BASIC_ENDPOINT_HPP #define ASIO_IP_IMPL_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const basic_endpoint& endpoint) { asio::ip::detail::endpoint tmp_ep(endpoint.address(), endpoint.port()); asio::error_code ec; std::string s = tmp_ep.to_string(ec); if (ec) { if (os.exceptions() & std::basic_ostream::failbit) asio::detail::throw_error(ec); else os.setstate(std::basic_ostream::failbit); } else for (std::string::iterator i = s.begin(); i != s.end(); ++i) os << os.widen(*i); return os; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_BASIC_ENDPOINT_HPP galera-3-25.3.20/asio/asio/ip/impl/address_v4.hpp0000644000015300001660000000243213042054732021145 0ustar jenkinsjenkins// // ip/impl/address_v4.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V4_HPP #define ASIO_IP_IMPL_ADDRESS_V4_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v4& addr) { asio::error_code ec; std::string s = addr.to_string(ec); if (ec) { if (os.exceptions() & std::basic_ostream::failbit) asio::detail::throw_error(ec); else os.setstate(std::basic_ostream::failbit); } else for (std::string::iterator i = s.begin(); i != s.end(); ++i) os << os.widen(*i); return os; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_ADDRESS_V4_HPP galera-3-25.3.20/asio/asio/ip/impl/address_v6.ipp0000644000015300001660000001760413042054732021157 0ustar jenkinsjenkins// // ip/impl/address_v6.ipp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V6_IPP #define ASIO_IP_IMPL_ADDRESS_V6_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/error.hpp" #include "asio/ip/address_v6.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { address_v6::address_v6() : addr_(), scope_id_(0) { } address_v6::address_v6(const address_v6::bytes_type& bytes, unsigned long scope) : scope_id_(scope) { #if UCHAR_MAX > 0xFF for (std::size_t i = 0; i < bytes.size(); ++i) { if (bytes[i] > 0xFF) { std::out_of_range ex("address_v6 from bytes_type"); asio::detail::throw_exception(ex); } } #endif // UCHAR_MAX > 0xFF using namespace std; // For memcpy. memcpy(addr_.s6_addr, bytes.data(), 16); } address_v6::address_v6(const address_v6& other) : addr_(other.addr_), scope_id_(other.scope_id_) { } #if defined(ASIO_HAS_MOVE) address_v6::address_v6(address_v6&& other) : addr_(other.addr_), scope_id_(other.scope_id_) { } #endif // defined(ASIO_HAS_MOVE) address_v6& address_v6::operator=(const address_v6& other) { addr_ = other.addr_; scope_id_ = other.scope_id_; return *this; } #if defined(ASIO_HAS_MOVE) address_v6& address_v6::operator=(address_v6&& other) { addr_ = other.addr_; scope_id_ = other.scope_id_; return *this; } #endif // defined(ASIO_HAS_MOVE) address_v6::bytes_type address_v6::to_bytes() const { using namespace std; // For memcpy. bytes_type bytes; #if defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.data(), addr_.s6_addr, 16); #else // defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.elems, addr_.s6_addr, 16); #endif // defined(ASIO_HAS_STD_ARRAY) return bytes; } std::string address_v6::to_string() const { asio::error_code ec; std::string addr = to_string(ec); asio::detail::throw_error(ec); return addr; } std::string address_v6::to_string(asio::error_code& ec) const { char addr_str[asio::detail::max_addr_v6_str_len]; const char* addr = asio::detail::socket_ops::inet_ntop( ASIO_OS_DEF(AF_INET6), &addr_, addr_str, asio::detail::max_addr_v6_str_len, scope_id_, ec); if (addr == 0) return std::string(); return addr; } address_v6 address_v6::from_string(const char* str) { asio::error_code ec; address_v6 addr = from_string(str, ec); asio::detail::throw_error(ec); return addr; } address_v6 address_v6::from_string( const char* str, asio::error_code& ec) { address_v6 tmp; if (asio::detail::socket_ops::inet_pton( ASIO_OS_DEF(AF_INET6), str, &tmp.addr_, &tmp.scope_id_, ec) <= 0) return address_v6(); return tmp; } address_v6 address_v6::from_string(const std::string& str) { return from_string(str.c_str()); } address_v6 address_v6::from_string( const std::string& str, asio::error_code& ec) { return from_string(str.c_str(), ec); } address_v4 address_v6::to_v4() const { if (!is_v4_mapped() && !is_v4_compatible()) { std::bad_cast ex; asio::detail::throw_exception(ex); } address_v4::bytes_type v4_bytes = { { addr_.s6_addr[12], addr_.s6_addr[13], addr_.s6_addr[14], addr_.s6_addr[15] } }; return address_v4(v4_bytes); } bool address_v6::is_loopback() const { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0) && (addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0) && (addr_.s6_addr[14] == 0) && (addr_.s6_addr[15] == 1)); } bool address_v6::is_unspecified() const { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0) && (addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0) && (addr_.s6_addr[14] == 0) && (addr_.s6_addr[15] == 0)); } bool address_v6::is_link_local() const { return ((addr_.s6_addr[0] == 0xfe) && ((addr_.s6_addr[1] & 0xc0) == 0x80)); } bool address_v6::is_site_local() const { return ((addr_.s6_addr[0] == 0xfe) && ((addr_.s6_addr[1] & 0xc0) == 0xc0)); } bool address_v6::is_v4_mapped() const { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0xff) && (addr_.s6_addr[11] == 0xff)); } bool address_v6::is_v4_compatible() const { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0) && !((addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0) && (addr_.s6_addr[14] == 0) && ((addr_.s6_addr[15] == 0) || (addr_.s6_addr[15] == 1)))); } bool address_v6::is_multicast() const { return (addr_.s6_addr[0] == 0xff); } bool address_v6::is_multicast_global() const { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x0e)); } bool address_v6::is_multicast_link_local() const { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x02)); } bool address_v6::is_multicast_node_local() const { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x01)); } bool address_v6::is_multicast_org_local() const { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x08)); } bool address_v6::is_multicast_site_local() const { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x05)); } bool operator==(const address_v6& a1, const address_v6& a2) { using namespace std; // For memcmp. return memcmp(&a1.addr_, &a2.addr_, sizeof(asio::detail::in6_addr_type)) == 0 && a1.scope_id_ == a2.scope_id_; } bool operator<(const address_v6& a1, const address_v6& a2) { using namespace std; // For memcmp. int memcmp_result = memcmp(&a1.addr_, &a2.addr_, sizeof(asio::detail::in6_addr_type)); if (memcmp_result < 0) return true; if (memcmp_result > 0) return false; return a1.scope_id_ < a2.scope_id_; } address_v6 address_v6::loopback() { address_v6 tmp; tmp.addr_.s6_addr[15] = 1; return tmp; } address_v6 address_v6::v4_mapped(const address_v4& addr) { address_v4::bytes_type v4_bytes = addr.to_bytes(); bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } }; return address_v6(v6_bytes); } address_v6 address_v6::v4_compatible(const address_v4& addr) { address_v4::bytes_type v4_bytes = addr.to_bytes(); bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } }; return address_v6(v6_bytes); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_ADDRESS_V6_IPP galera-3-25.3.20/asio/asio/ip/impl/address.ipp0000644000015300001660000001122513042054732020535 0ustar jenkinsjenkins// // ip/impl/address.ipp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_IPP #define ASIO_IP_IMPL_ADDRESS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/error.hpp" #include "asio/ip/address.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { address::address() : type_(ipv4), ipv4_address_(), ipv6_address_() { } address::address(const asio::ip::address_v4& ipv4_address) : type_(ipv4), ipv4_address_(ipv4_address), ipv6_address_() { } address::address(const asio::ip::address_v6& ipv6_address) : type_(ipv6), ipv4_address_(), ipv6_address_(ipv6_address) { } address::address(const address& other) : type_(other.type_), ipv4_address_(other.ipv4_address_), ipv6_address_(other.ipv6_address_) { } #if defined(ASIO_HAS_MOVE) address::address(address&& other) : type_(other.type_), ipv4_address_(other.ipv4_address_), ipv6_address_(other.ipv6_address_) { } #endif // defined(ASIO_HAS_MOVE) address& address::operator=(const address& other) { type_ = other.type_; ipv4_address_ = other.ipv4_address_; ipv6_address_ = other.ipv6_address_; return *this; } #if defined(ASIO_HAS_MOVE) address& address::operator=(address&& other) { type_ = other.type_; ipv4_address_ = other.ipv4_address_; ipv6_address_ = other.ipv6_address_; return *this; } #endif // defined(ASIO_HAS_MOVE) address& address::operator=(const asio::ip::address_v4& ipv4_address) { type_ = ipv4; ipv4_address_ = ipv4_address; ipv6_address_ = asio::ip::address_v6(); return *this; } address& address::operator=(const asio::ip::address_v6& ipv6_address) { type_ = ipv6; ipv4_address_ = asio::ip::address_v4(); ipv6_address_ = ipv6_address; return *this; } asio::ip::address_v4 address::to_v4() const { if (type_ != ipv4) { std::bad_cast ex; asio::detail::throw_exception(ex); } return ipv4_address_; } asio::ip::address_v6 address::to_v6() const { if (type_ != ipv6) { std::bad_cast ex; asio::detail::throw_exception(ex); } return ipv6_address_; } std::string address::to_string() const { if (type_ == ipv6) return ipv6_address_.to_string(); return ipv4_address_.to_string(); } std::string address::to_string(asio::error_code& ec) const { if (type_ == ipv6) return ipv6_address_.to_string(ec); return ipv4_address_.to_string(ec); } address address::from_string(const char* str) { asio::error_code ec; address addr = from_string(str, ec); asio::detail::throw_error(ec); return addr; } address address::from_string(const char* str, asio::error_code& ec) { asio::ip::address_v6 ipv6_address = asio::ip::address_v6::from_string(str, ec); if (!ec) { address tmp; tmp.type_ = ipv6; tmp.ipv6_address_ = ipv6_address; return tmp; } asio::ip::address_v4 ipv4_address = asio::ip::address_v4::from_string(str, ec); if (!ec) { address tmp; tmp.type_ = ipv4; tmp.ipv4_address_ = ipv4_address; return tmp; } return address(); } address address::from_string(const std::string& str) { return from_string(str.c_str()); } address address::from_string(const std::string& str, asio::error_code& ec) { return from_string(str.c_str(), ec); } bool address::is_loopback() const { return (type_ == ipv4) ? ipv4_address_.is_loopback() : ipv6_address_.is_loopback(); } bool address::is_unspecified() const { return (type_ == ipv4) ? ipv4_address_.is_unspecified() : ipv6_address_.is_unspecified(); } bool address::is_multicast() const { return (type_ == ipv4) ? ipv4_address_.is_multicast() : ipv6_address_.is_multicast(); } bool operator==(const address& a1, const address& a2) { if (a1.type_ != a2.type_) return false; if (a1.type_ == address::ipv6) return a1.ipv6_address_ == a2.ipv6_address_; return a1.ipv4_address_ == a2.ipv4_address_; } bool operator<(const address& a1, const address& a2) { if (a1.type_ < a2.type_) return true; if (a1.type_ > a2.type_) return false; if (a1.type_ == address::ipv6) return a1.ipv6_address_ < a2.ipv6_address_; return a1.ipv4_address_ < a2.ipv4_address_; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_ADDRESS_IPP galera-3-25.3.20/asio/asio/ip/impl/address_v4.ipp0000644000015300001660000001013713042054732021147 0ustar jenkinsjenkins// // ip/impl/address_v4.ipp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V4_IPP #define ASIO_IP_IMPL_ADDRESS_V4_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/error.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/ip/address_v4.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { address_v4::address_v4(const address_v4::bytes_type& bytes) { #if UCHAR_MAX > 0xFF if (bytes[0] > 0xFF || bytes[1] > 0xFF || bytes[2] > 0xFF || bytes[3] > 0xFF) { std::out_of_range ex("address_v4 from bytes_type"); asio::detail::throw_exception(ex); } #endif // UCHAR_MAX > 0xFF using namespace std; // For memcpy. memcpy(&addr_.s_addr, bytes.data(), 4); } address_v4::address_v4(unsigned long addr) { #if ULONG_MAX > 0xFFFFFFFF if (addr > 0xFFFFFFFF) { std::out_of_range ex("address_v4 from unsigned long"); asio::detail::throw_exception(ex); } #endif // ULONG_MAX > 0xFFFFFFFF addr_.s_addr = asio::detail::socket_ops::host_to_network_long( static_cast(addr)); } address_v4::bytes_type address_v4::to_bytes() const { using namespace std; // For memcpy. bytes_type bytes; #if defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.data(), &addr_.s_addr, 4); #else // defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.elems, &addr_.s_addr, 4); #endif // defined(ASIO_HAS_STD_ARRAY) return bytes; } unsigned long address_v4::to_ulong() const { return asio::detail::socket_ops::network_to_host_long(addr_.s_addr); } std::string address_v4::to_string() const { asio::error_code ec; std::string addr = to_string(ec); asio::detail::throw_error(ec); return addr; } std::string address_v4::to_string(asio::error_code& ec) const { char addr_str[asio::detail::max_addr_v4_str_len]; const char* addr = asio::detail::socket_ops::inet_ntop( ASIO_OS_DEF(AF_INET), &addr_, addr_str, asio::detail::max_addr_v4_str_len, 0, ec); if (addr == 0) return std::string(); return addr; } address_v4 address_v4::from_string(const char* str) { asio::error_code ec; address_v4 addr = from_string(str, ec); asio::detail::throw_error(ec); return addr; } address_v4 address_v4::from_string( const char* str, asio::error_code& ec) { address_v4 tmp; if (asio::detail::socket_ops::inet_pton( ASIO_OS_DEF(AF_INET), str, &tmp.addr_, 0, ec) <= 0) return address_v4(); return tmp; } address_v4 address_v4::from_string(const std::string& str) { return from_string(str.c_str()); } address_v4 address_v4::from_string( const std::string& str, asio::error_code& ec) { return from_string(str.c_str(), ec); } bool address_v4::is_loopback() const { return (to_ulong() & 0xFF000000) == 0x7F000000; } bool address_v4::is_unspecified() const { return to_ulong() == 0; } bool address_v4::is_class_a() const { return (to_ulong() & 0x80000000) == 0; } bool address_v4::is_class_b() const { return (to_ulong() & 0xC0000000) == 0x80000000; } bool address_v4::is_class_c() const { return (to_ulong() & 0xE0000000) == 0xC0000000; } bool address_v4::is_multicast() const { return (to_ulong() & 0xF0000000) == 0xE0000000; } address_v4 address_v4::broadcast(const address_v4& addr, const address_v4& mask) { return address_v4(addr.to_ulong() | (mask.to_ulong() ^ 0xFFFFFFFF)); } address_v4 address_v4::netmask(const address_v4& addr) { if (addr.is_class_a()) return address_v4(0xFF000000); if (addr.is_class_b()) return address_v4(0xFFFF0000); if (addr.is_class_c()) return address_v4(0xFFFFFF00); return address_v4(0xFFFFFFFF); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_ADDRESS_V4_IPP galera-3-25.3.20/asio/asio/ip/impl/host_name.ipp0000644000015300001660000000241013042054732021061 0ustar jenkinsjenkins// // ip/impl/host_name.ipp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_HOST_NAME_IPP #define ASIO_IP_IMPL_HOST_NAME_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/ip/host_name.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { std::string host_name() { char name[1024]; asio::error_code ec; if (asio::detail::socket_ops::gethostname(name, sizeof(name), ec) != 0) { asio::detail::throw_error(ec); return std::string(); } return std::string(name); } std::string host_name(asio::error_code& ec) { char name[1024]; if (asio::detail::socket_ops::gethostname(name, sizeof(name), ec) != 0) return std::string(); return std::string(name); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_HOST_NAME_IPP galera-3-25.3.20/asio/asio/ip/basic_resolver.hpp0000644000015300001660000002211213042054732021145 0ustar jenkinsjenkins// // ip/basic_resolver.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_HPP #define ASIO_IP_BASIC_RESOLVER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_io_object.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/resolver_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Provides endpoint resolution functionality. /** * The basic_resolver class template provides the ability to resolve a query * to a list of endpoints. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template > class basic_resolver : public basic_io_object { public: /// The protocol type. typedef InternetProtocol protocol_type; /// The endpoint type. typedef typename InternetProtocol::endpoint endpoint_type; /// The query type. typedef basic_resolver_query query; /// The iterator type. typedef basic_resolver_iterator iterator; /// Constructor. /** * This constructor creates a basic_resolver. * * @param io_service The io_service object that the resolver will use to * dispatch handlers for any asynchronous operations performed on the timer. */ explicit basic_resolver(asio::io_service& io_service) : basic_io_object(io_service) { } /// Cancel any asynchronous operations that are waiting on the resolver. /** * This function forces the completion of any pending asynchronous * operations on the host resolver. The handler for each cancelled operation * will be invoked with the asio::error::operation_aborted error code. */ void cancel() { return this->service.cancel(this->implementation); } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve a query into a list of endpoint entries. * * @param q A query object that determines what endpoints will be returned. * * @returns A forward-only iterator that can be used to traverse the list * of endpoint entries. * * @throws asio::system_error Thrown on failure. * * @note A default constructed iterator represents the end of the list. * * A successful call to this function is guaranteed to return at least one * entry. */ iterator resolve(const query& q) { asio::error_code ec; iterator i = this->service.resolve(this->implementation, q, ec); asio::detail::throw_error(ec, "resolve"); return i; } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve a query into a list of endpoint entries. * * @param q A query object that determines what endpoints will be returned. * * @param ec Set to indicate what error occurred, if any. * * @returns A forward-only iterator that can be used to traverse the list * of endpoint entries. Returns a default constructed iterator if an error * occurs. * * @note A default constructed iterator represents the end of the list. * * A successful call to this function is guaranteed to return at least one * entry. */ iterator resolve(const query& q, asio::error_code& ec) { return this->service.resolve(this->implementation, q, ec); } /// Asynchronously perform forward resolution of a query to a list of entries. /** * This function is used to asynchronously resolve a query into a list of * endpoint entries. * * @param q A query object that determines what endpoints will be returned. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::iterator iterator // Forward-only iterator that can * // be used to traverse the list * // of endpoint entries. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note A default constructed iterator represents the end of the list. * * A successful resolve operation is guaranteed to pass at least one entry to * the handler. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, iterator)) async_resolve(const query& q, ASIO_MOVE_ARG(ResolveHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ResolveHandler. ASIO_RESOLVE_HANDLER_CHECK( ResolveHandler, handler, iterator) type_check; return this->service.async_resolve(this->implementation, q, ASIO_MOVE_CAST(ResolveHandler)(handler)); } /// Perform reverse resolution of an endpoint to a list of entries. /** * This function is used to resolve an endpoint into a list of endpoint * entries. * * @param e An endpoint object that determines what endpoints will be * returned. * * @returns A forward-only iterator that can be used to traverse the list * of endpoint entries. * * @throws asio::system_error Thrown on failure. * * @note A default constructed iterator represents the end of the list. * * A successful call to this function is guaranteed to return at least one * entry. */ iterator resolve(const endpoint_type& e) { asio::error_code ec; iterator i = this->service.resolve(this->implementation, e, ec); asio::detail::throw_error(ec, "resolve"); return i; } /// Perform reverse resolution of an endpoint to a list of entries. /** * This function is used to resolve an endpoint into a list of endpoint * entries. * * @param e An endpoint object that determines what endpoints will be * returned. * * @param ec Set to indicate what error occurred, if any. * * @returns A forward-only iterator that can be used to traverse the list * of endpoint entries. Returns a default constructed iterator if an error * occurs. * * @note A default constructed iterator represents the end of the list. * * A successful call to this function is guaranteed to return at least one * entry. */ iterator resolve(const endpoint_type& e, asio::error_code& ec) { return this->service.resolve(this->implementation, e, ec); } /// Asynchronously perform reverse resolution of an endpoint to a list of /// entries. /** * This function is used to asynchronously resolve an endpoint into a list of * endpoint entries. * * @param e An endpoint object that determines what endpoints will be * returned. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::iterator iterator // Forward-only iterator that can * // be used to traverse the list * // of endpoint entries. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note A default constructed iterator represents the end of the list. * * A successful resolve operation is guaranteed to pass at least one entry to * the handler. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, iterator)) async_resolve(const endpoint_type& e, ASIO_MOVE_ARG(ResolveHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ResolveHandler. ASIO_RESOLVE_HANDLER_CHECK( ResolveHandler, handler, iterator) type_check; return this->service.async_resolve(this->implementation, e, ASIO_MOVE_CAST(ResolveHandler)(handler)); } }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_HPP galera-3-25.3.20/asio/asio/ssl/0000755000015300001660000000000013042054732015625 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ssl/detail/0000755000015300001660000000000013042054732017067 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ssl/detail/stream_core.hpp0000644000015300001660000000712313042054732022106 0ustar jenkinsjenkins// // ssl/detail/stream_core.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_STREAM_CORE_HPP #define ASIO_SSL_DETAIL_STREAM_CORE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/deadline_timer.hpp" # else // defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/steady_timer.hpp" # endif // defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/ssl/detail/engine.hpp" # include "asio/buffer.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) struct stream_core { // According to the OpenSSL documentation, this is the buffer size that is // sufficient to hold the largest possible TLS record. enum { max_tls_record_size = 17 * 1024 }; stream_core(SSL_CTX* context, asio::io_service& io_service) : engine_(context), pending_read_(io_service), pending_write_(io_service), output_buffer_space_(max_tls_record_size), output_buffer_(asio::buffer(output_buffer_space_)), input_buffer_space_(max_tls_record_size), input_buffer_(asio::buffer(input_buffer_space_)) { pending_read_.expires_at(neg_infin()); pending_write_.expires_at(neg_infin()); } ~stream_core() { } // The SSL engine. engine engine_; #if defined(ASIO_HAS_BOOST_DATE_TIME) // Timer used for storing queued read operations. asio::deadline_timer pending_read_; // Timer used for storing queued write operations. asio::deadline_timer pending_write_; // Helper function for obtaining a time value that always fires. static asio::deadline_timer::time_type neg_infin() { return boost::posix_time::neg_infin; } // Helper function for obtaining a time value that never fires. static asio::deadline_timer::time_type pos_infin() { return boost::posix_time::pos_infin; } #else // defined(ASIO_HAS_BOOST_DATE_TIME) // Timer used for storing queued read operations. asio::steady_timer pending_read_; // Timer used for storing queued write operations. asio::steady_timer pending_write_; // Helper function for obtaining a time value that always fires. static asio::steady_timer::time_point neg_infin() { return (asio::steady_timer::time_point::min)(); } // Helper function for obtaining a time value that never fires. static asio::steady_timer::time_point pos_infin() { return (asio::steady_timer::time_point::max)(); } #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // Buffer space used to prepare output intended for the transport. std::vector output_buffer_space_; // A buffer that may be used to prepare output intended for the transport. const asio::mutable_buffers_1 output_buffer_; // Buffer space used to read input intended for the engine. std::vector input_buffer_space_; // A buffer that may be used to read input intended for the engine. const asio::mutable_buffers_1 input_buffer_; // The buffer pointing to the engine's unconsumed input. asio::const_buffer input_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_STREAM_CORE_HPP galera-3-25.3.20/asio/asio/ssl/detail/openssl_types.hpp0000644000015300001660000000147113042054732022512 0ustar jenkinsjenkins// // ssl/detail/openssl_types.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP #define ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #if !defined(OPENSSL_NO_ENGINE) # include #endif // !defined(OPENSSL_NO_ENGINE) #include #include #include "asio/detail/socket_types.hpp" #endif // ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP galera-3-25.3.20/asio/asio/ssl/detail/write_op.hpp0000644000015300001660000000324313042054732021432 0ustar jenkinsjenkins// // ssl/detail/write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_WRITE_OP_HPP #define ASIO_SSL_DETAIL_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/buffer_sequence_adapter.hpp" # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template class write_op { public: write_op(const ConstBufferSequence& buffers) : buffers_(buffers) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { asio::const_buffer buffer = asio::detail::buffer_sequence_adapter::first(buffers_); return eng.write(buffer, ec, bytes_transferred); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: ConstBufferSequence buffers_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_WRITE_OP_HPP galera-3-25.3.20/asio/asio/ssl/detail/engine.hpp0000644000015300001660000001257213042054732021054 0ustar jenkinsjenkins// // ssl/detail/engine.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_ENGINE_HPP #define ASIO_SSL_DETAIL_ENGINE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/buffer.hpp" # include "asio/detail/static_mutex.hpp" # include "asio/ssl/detail/openssl_types.hpp" # include "asio/ssl/detail/verify_callback.hpp" # include "asio/ssl/stream_base.hpp" # include "asio/ssl/verify_mode.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class engine { public: enum want { // Returned by functions to indicate that the engine wants input. The input // buffer should be updated to point to the data. The engine then needs to // be called again to retry the operation. want_input_and_retry = -2, // Returned by functions to indicate that the engine wants to write output. // The output buffer points to the data to be written. The engine then // needs to be called again to retry the operation. want_output_and_retry = -1, // Returned by functions to indicate that the engine doesn't need input or // output. want_nothing = 0, // Returned by functions to indicate that the engine wants to write output. // The output buffer points to the data to be written. After that the // operation is complete, and the engine does not need to be called again. want_output = 1 }; // Construct a new engine for the specified context. ASIO_DECL explicit engine(SSL_CTX* context); // Destructor. ASIO_DECL ~engine(); // Get the underlying implementation in the native type. ASIO_DECL SSL* native_handle(); // Set the peer verification mode. ASIO_DECL asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec); // Set the peer verification depth. ASIO_DECL asio::error_code set_verify_depth( int depth, asio::error_code& ec); // Set a peer certificate verification callback. ASIO_DECL asio::error_code set_verify_callback( verify_callback_base* callback, asio::error_code& ec); // Perform an SSL handshake using either SSL_connect (client-side) or // SSL_accept (server-side). ASIO_DECL want handshake( stream_base::handshake_type type, asio::error_code& ec); // Perform a graceful shutdown of the SSL session. ASIO_DECL want shutdown(asio::error_code& ec); // Write bytes to the SSL session. ASIO_DECL want write(const asio::const_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred); // Read bytes from the SSL session. ASIO_DECL want read(const asio::mutable_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred); // Get output data to be written to the transport. ASIO_DECL asio::mutable_buffers_1 get_output( const asio::mutable_buffer& data); // Put input data that was read from the transport. ASIO_DECL asio::const_buffer put_input( const asio::const_buffer& data); // Map an error::eof code returned by the underlying transport according to // the type and state of the SSL session. Returns a const reference to the // error code object, suitable for passing to a completion handler. ASIO_DECL const asio::error_code& map_error_code( asio::error_code& ec) const; private: // Disallow copying and assignment. engine(const engine&); engine& operator=(const engine&); // Callback used when the SSL implementation wants to verify a certificate. ASIO_DECL static int verify_callback_function( int preverified, X509_STORE_CTX* ctx); // The SSL_accept function may not be thread safe. This mutex is used to // protect all calls to the SSL_accept function. ASIO_DECL static asio::detail::static_mutex& accept_mutex(); // Perform one operation. Returns >= 0 on success or error, want_read if the // operation needs more input, or want_write if it needs to write some output // before the operation can complete. ASIO_DECL want perform(int (engine::* op)(void*, std::size_t), void* data, std::size_t length, asio::error_code& ec, std::size_t* bytes_transferred); // Adapt the SSL_accept function to the signature needed for perform(). ASIO_DECL int do_accept(void*, std::size_t); // Adapt the SSL_connect function to the signature needed for perform(). ASIO_DECL int do_connect(void*, std::size_t); // Adapt the SSL_shutdown function to the signature needed for perform(). ASIO_DECL int do_shutdown(void*, std::size_t); // Adapt the SSL_read function to the signature needed for perform(). ASIO_DECL int do_read(void* data, std::size_t length); // Adapt the SSL_write function to the signature needed for perform(). ASIO_DECL int do_write(void* data, std::size_t length); SSL* ssl_; BIO* ext_bio_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/detail/impl/engine.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_DETAIL_ENGINE_HPP galera-3-25.3.20/asio/asio/ssl/detail/shutdown_op.hpp0000644000015300001660000000245513042054732022157 0ustar jenkinsjenkins// // ssl/detail/shutdown_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP #define ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class shutdown_op { public: engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { bytes_transferred = 0; return eng.shutdown(ec); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t&) const { handler(ec); } }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP galera-3-25.3.20/asio/asio/ssl/detail/password_callback.hpp0000644000015300001660000000306013042054732023255 0ustar jenkinsjenkins// // ssl/detail/password_callback.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP #define ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include # include "asio/ssl/context_base.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class password_callback_base { public: virtual ~password_callback_base() { } virtual std::string call(std::size_t size, context_base::password_purpose purpose) = 0; }; template class password_callback : public password_callback_base { public: explicit password_callback(PasswordCallback callback) : callback_(callback) { } virtual std::string call(std::size_t size, context_base::password_purpose purpose) { return callback_(size, purpose); } private: PasswordCallback callback_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP galera-3-25.3.20/asio/asio/ssl/detail/verify_callback.hpp0000644000015300001660000000266613042054732022732 0ustar jenkinsjenkins// // ssl/detail/verify_callback.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP #define ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/verify_context.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class verify_callback_base { public: virtual ~verify_callback_base() { } virtual bool call(bool preverified, verify_context& ctx) = 0; }; template class verify_callback : public verify_callback_base { public: explicit verify_callback(VerifyCallback callback) : callback_(callback) { } virtual bool call(bool preverified, verify_context& ctx) { return callback_(preverified, ctx); } private: VerifyCallback callback_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP galera-3-25.3.20/asio/asio/ssl/detail/buffered_handshake_op.hpp0000644000015300001660000000604513042054732024073 0ustar jenkinsjenkins// // ssl/detail/buffered_handshake_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP #define ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template class buffered_handshake_op { public: buffered_handshake_op(stream_base::handshake_type type, const ConstBufferSequence& buffers) : type_(type), buffers_(buffers), total_buffer_size_(asio::buffer_size(buffers_)) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { typename ConstBufferSequence::const_iterator iter = buffers_.begin(); typename ConstBufferSequence::const_iterator end = buffers_.end(); std::size_t accumulated_size = 0; for (;;) { engine::want want = eng.handshake(type_, ec); if (want != engine::want_input_and_retry || bytes_transferred == total_buffer_size_) return want; // Find the next buffer piece to be fed to the engine. while (iter != end) { const_buffer buffer(*iter); // Skip over any buffers which have already been consumed by the engine. if (bytes_transferred >= accumulated_size + buffer_size(buffer)) { accumulated_size += buffer_size(buffer); ++iter; continue; } // The current buffer may have been partially consumed by the engine on // a previous iteration. If so, adjust the buffer to point to the // unused portion. if (bytes_transferred > accumulated_size) buffer = buffer + (bytes_transferred - accumulated_size); // Pass the buffer to the engine, and update the bytes transferred to // reflect the total number of bytes consumed so far. bytes_transferred += buffer_size(buffer); buffer = eng.put_input(buffer); bytes_transferred -= buffer_size(buffer); break; } } } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: stream_base::handshake_type type_; ConstBufferSequence buffers_; std::size_t total_buffer_size_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP galera-3-25.3.20/asio/asio/ssl/detail/handshake_op.hpp0000644000015300001660000000266613042054732022236 0ustar jenkinsjenkins// // ssl/detail/handshake_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP #define ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class handshake_op { public: handshake_op(stream_base::handshake_type type) : type_(type) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { bytes_transferred = 0; return eng.handshake(type_, ec); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t&) const { handler(ec); } private: stream_base::handshake_type type_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP galera-3-25.3.20/asio/asio/ssl/detail/read_op.hpp0000644000015300001660000000324713042054732021217 0ustar jenkinsjenkins// // ssl/detail/read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_READ_OP_HPP #define ASIO_SSL_DETAIL_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/buffer_sequence_adapter.hpp" # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template class read_op { public: read_op(const MutableBufferSequence& buffers) : buffers_(buffers) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter::first(buffers_); return eng.read(buffer, ec, bytes_transferred); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: MutableBufferSequence buffers_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_READ_OP_HPP galera-3-25.3.20/asio/asio/ssl/detail/io.hpp0000644000015300001660000002460313042054732020214 0ustar jenkinsjenkins// // ssl/detail/io.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IO_HPP #define ASIO_SSL_DETAIL_IO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" # include "asio/ssl/detail/stream_core.hpp" # include "asio/write.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template std::size_t io(Stream& next_layer, stream_core& core, const Operation& op, asio::error_code& ec) { std::size_t bytes_transferred = 0; do switch (op(core.engine_, ec, bytes_transferred)) { case engine::want_input_and_retry: // If the input buffer is empty then we need to read some more data from // the underlying transport. if (asio::buffer_size(core.input_) == 0) core.input_ = asio::buffer(core.input_buffer_, next_layer.read_some(core.input_buffer_, ec)); // Pass the new input data to the engine. core.input_ = core.engine_.put_input(core.input_); // Try the operation again. continue; case engine::want_output_and_retry: // Get output data from the engine and write it to the underlying // transport. asio::write(next_layer, core.engine_.get_output(core.output_buffer_), ec); // Try the operation again. continue; case engine::want_output: // Get output data from the engine and write it to the underlying // transport. asio::write(next_layer, core.engine_.get_output(core.output_buffer_), ec); // Operation is complete. Return result to caller. core.engine_.map_error_code(ec); return bytes_transferred; default: // Operation is complete. Return result to caller. core.engine_.map_error_code(ec); return bytes_transferred; } while (!ec); // Operation failed. Return result to caller. core.engine_.map_error_code(ec); return 0; } template class io_op { public: io_op(Stream& next_layer, stream_core& core, const Operation& op, Handler& handler) : next_layer_(next_layer), core_(core), op_(op), start_(0), want_(engine::want_nothing), bytes_transferred_(0), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } #if defined(ASIO_HAS_MOVE) io_op(const io_op& other) : next_layer_(other.next_layer_), core_(other.core_), op_(other.op_), start_(other.start_), want_(other.want_), ec_(other.ec_), bytes_transferred_(other.bytes_transferred_), handler_(other.handler_) { } io_op(io_op&& other) : next_layer_(other.next_layer_), core_(other.core_), op_(other.op_), start_(other.start_), want_(other.want_), ec_(other.ec_), bytes_transferred_(other.bytes_transferred_), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(asio::error_code ec, std::size_t bytes_transferred = ~std::size_t(0), int start = 0) { switch (start_ = start) { case 1: // Called after at least one async operation. do { switch (want_ = op_(core_.engine_, ec_, bytes_transferred_)) { case engine::want_input_and_retry: // If the input buffer already has data in it we can pass it to the // engine and then retry the operation immediately. if (asio::buffer_size(core_.input_) != 0) { core_.input_ = core_.engine_.put_input(core_.input_); continue; } // The engine wants more data to be read from input. However, we // cannot allow more than one read operation at a time on the // underlying transport. The pending_read_ timer's expiry is set to // pos_infin if a read is in progress, and neg_infin otherwise. if (core_.pending_read_.expires_at() == core_.neg_infin()) { // Prevent other read operations from being started. core_.pending_read_.expires_at(core_.pos_infin()); // Start reading some data from the underlying transport. next_layer_.async_read_some( asio::buffer(core_.input_buffer_), ASIO_MOVE_CAST(io_op)(*this)); } else { // Wait until the current read operation completes. core_.pending_read_.async_wait(ASIO_MOVE_CAST(io_op)(*this)); } // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; case engine::want_output_and_retry: case engine::want_output: // The engine wants some data to be written to the output. However, we // cannot allow more than one write operation at a time on the // underlying transport. The pending_write_ timer's expiry is set to // pos_infin if a write is in progress, and neg_infin otherwise. if (core_.pending_write_.expires_at() == core_.neg_infin()) { // Prevent other write operations from being started. core_.pending_write_.expires_at(core_.pos_infin()); // Start writing all the data to the underlying transport. asio::async_write(next_layer_, core_.engine_.get_output(core_.output_buffer_), ASIO_MOVE_CAST(io_op)(*this)); } else { // Wait until the current write operation completes. core_.pending_write_.async_wait(ASIO_MOVE_CAST(io_op)(*this)); } // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; default: // The SSL operation is done and we can invoke the handler, but we // have to keep in mind that this function might be being called from // the async operation's initiating function. In this case we're not // allowed to call the handler directly. Instead, issue a zero-sized // read so the handler runs "as-if" posted using io_service::post(). if (start) { next_layer_.async_read_some( asio::buffer(core_.input_buffer_, 0), ASIO_MOVE_CAST(io_op)(*this)); // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; } else { // Continue on to run handler directly. break; } } default: if (bytes_transferred == ~std::size_t(0)) bytes_transferred = 0; // Timer cancellation, no data transferred. else if (!ec_) ec_ = ec; switch (want_) { case engine::want_input_and_retry: // Add received data to the engine's input. core_.input_ = asio::buffer( core_.input_buffer_, bytes_transferred); core_.input_ = core_.engine_.put_input(core_.input_); // Release any waiting read operations. core_.pending_read_.expires_at(core_.neg_infin()); // Try the operation again. continue; case engine::want_output_and_retry: // Release any waiting write operations. core_.pending_write_.expires_at(core_.neg_infin()); // Try the operation again. continue; case engine::want_output: // Release any waiting write operations. core_.pending_write_.expires_at(core_.neg_infin()); // Fall through to call handler. default: // Pass the result to the handler. op_.call_handler(handler_, core_.engine_.map_error_code(ec_), ec_ ? 0 : bytes_transferred_); // Our work here is done. return; } } while (!ec_); // Operation failed. Pass the result to the handler. op_.call_handler(handler_, core_.engine_.map_error_code(ec_), 0); } } //private: Stream& next_layer_; stream_core& core_; Operation op_; int start_; engine::want want_; asio::error_code ec_; std::size_t bytes_transferred_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, io_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, io_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( io_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation(this_handler->handler_); } template inline void asio_handler_invoke(Function& function, io_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, io_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void async_io(Stream& next_layer, stream_core& core, const Operation& op, Handler& handler) { io_op( next_layer, core, op, handler)( asio::error_code(), 0, 1); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IO_HPP galera-3-25.3.20/asio/asio/ssl/detail/impl/0000755000015300001660000000000013042054732020030 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ssl/detail/impl/openssl_init.ipp0000644000015300001660000001057613042054732023261 0ustar jenkinsjenkins// // ssl/detail/impl/openssl_init.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP #define ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/assert.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class openssl_init_base::do_init { public: do_init() { ::SSL_library_init(); ::SSL_load_error_strings(); ::OpenSSL_add_all_algorithms(); mutexes_.resize(::CRYPTO_num_locks()); for (size_t i = 0; i < mutexes_.size(); ++i) mutexes_[i].reset(new asio::detail::mutex); ::CRYPTO_set_locking_callback(&do_init::openssl_locking_func); ::CRYPTO_set_id_callback(&do_init::openssl_id_func); #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) null_compression_methods_ = sk_SSL_COMP_new_null(); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) } ~do_init() { #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) sk_SSL_COMP_free(null_compression_methods_); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) ::CRYPTO_set_id_callback(0); ::CRYPTO_set_locking_callback(0); ::ERR_free_strings(); #if (OPENSSL_VERSION_NUMBER >= 0x10000000L) ::ERR_remove_thread_state(NULL); #else // (OPENSSL_VERSION_NUMBER >= 0x10000000L) ::ERR_remove_state(0); #endif // (OPENSSL_VERSION_NUMBER >= 0x10000000L) ::EVP_cleanup(); ::CRYPTO_cleanup_all_ex_data(); ::CONF_modules_unload(1); #if !defined(OPENSSL_NO_ENGINE) ::ENGINE_cleanup(); #endif // !defined(OPENSSL_NO_ENGINE) } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* get_null_compression_methods() const { return null_compression_methods_; } #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) private: static unsigned long openssl_id_func() { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ::GetCurrentThreadId(); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) void* id = instance()->thread_id_; if (id == 0) instance()->thread_id_ = id = &id; // Ugh. ASIO_ASSERT(sizeof(unsigned long) >= sizeof(void*)); return reinterpret_cast(id); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } static void openssl_locking_func(int mode, int n, const char* /*file*/, int /*line*/) { if (mode & CRYPTO_LOCK) instance()->mutexes_[n]->lock(); else instance()->mutexes_[n]->unlock(); } // Mutexes to be used in locking callbacks. std::vector > mutexes_; #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // The thread identifiers to be used by openssl. asio::detail::tss_ptr thread_id_; #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* null_compression_methods_; #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) }; asio::detail::shared_ptr openssl_init_base::instance() { static asio::detail::shared_ptr init(new do_init); return init; } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* openssl_init_base::get_null_compression_methods() { return instance()->get_null_compression_methods(); } #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP galera-3-25.3.20/asio/asio/ssl/detail/impl/engine.ipp0000644000015300001660000001756513042054732022025 0ustar jenkinsjenkins// // ssl/detail/impl/engine.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IMPL_ENGINE_IPP #define ASIO_SSL_DETAIL_IMPL_ENGINE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/throw_error.hpp" # include "asio/error.hpp" # include "asio/ssl/detail/engine.hpp" # include "asio/ssl/error.hpp" # include "asio/ssl/verify_context.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) engine::engine(SSL_CTX* context) : ssl_(::SSL_new(context)) { if (!ssl_) { asio::error_code ec( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "engine"); } accept_mutex().init(); ::SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE); ::SSL_set_mode(ssl_, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); #if defined(SSL_MODE_RELEASE_BUFFERS) ::SSL_set_mode(ssl_, SSL_MODE_RELEASE_BUFFERS); #endif // defined(SSL_MODE_RELEASE_BUFFERS) ::BIO* int_bio = 0; ::BIO_new_bio_pair(&int_bio, 0, &ext_bio_, 0); ::SSL_set_bio(ssl_, int_bio, int_bio); } engine::~engine() { if (SSL_get_app_data(ssl_)) { delete static_cast(SSL_get_app_data(ssl_)); SSL_set_app_data(ssl_, 0); } ::BIO_free(ext_bio_); ::SSL_free(ssl_); } SSL* engine::native_handle() { return ssl_; } asio::error_code engine::set_verify_mode( verify_mode v, asio::error_code& ec) { ::SSL_set_verify(ssl_, v, ::SSL_get_verify_callback(ssl_)); ec = asio::error_code(); return ec; } asio::error_code engine::set_verify_depth( int depth, asio::error_code& ec) { ::SSL_set_verify_depth(ssl_, depth); ec = asio::error_code(); return ec; } asio::error_code engine::set_verify_callback( verify_callback_base* callback, asio::error_code& ec) { if (SSL_get_app_data(ssl_)) delete static_cast(SSL_get_app_data(ssl_)); SSL_set_app_data(ssl_, callback); ::SSL_set_verify(ssl_, ::SSL_get_verify_mode(ssl_), &engine::verify_callback_function); ec = asio::error_code(); return ec; } int engine::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_get_app_data(ssl)) { verify_callback_base* callback = static_cast( SSL_get_app_data(ssl)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } return 0; } engine::want engine::handshake( stream_base::handshake_type type, asio::error_code& ec) { return perform((type == asio::ssl::stream_base::client) ? &engine::do_connect : &engine::do_accept, 0, 0, ec, 0); } engine::want engine::shutdown(asio::error_code& ec) { return perform(&engine::do_shutdown, 0, 0, ec, 0); } engine::want engine::write(const asio::const_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred) { if (asio::buffer_size(data) == 0) { ec = asio::error_code(); return engine::want_nothing; } return perform(&engine::do_write, const_cast(asio::buffer_cast(data)), asio::buffer_size(data), ec, &bytes_transferred); } engine::want engine::read(const asio::mutable_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred) { if (asio::buffer_size(data) == 0) { ec = asio::error_code(); return engine::want_nothing; } return perform(&engine::do_read, asio::buffer_cast(data), asio::buffer_size(data), ec, &bytes_transferred); } asio::mutable_buffers_1 engine::get_output( const asio::mutable_buffer& data) { int length = ::BIO_read(ext_bio_, asio::buffer_cast(data), static_cast(asio::buffer_size(data))); return asio::buffer(data, length > 0 ? static_cast(length) : 0); } asio::const_buffer engine::put_input( const asio::const_buffer& data) { int length = ::BIO_write(ext_bio_, asio::buffer_cast(data), static_cast(asio::buffer_size(data))); return asio::buffer(data + (length > 0 ? static_cast(length) : 0)); } const asio::error_code& engine::map_error_code( asio::error_code& ec) const { // We only want to map the error::eof code. if (ec != asio::error::eof) return ec; // If there's data yet to be read, it's an error. if (BIO_wpending(ext_bio_)) { ec = asio::error_code( ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ), asio::error::get_ssl_category()); return ec; } // SSL v2 doesn't provide a protocol-level shutdown, so an eof on the // underlying transport is passed through. if (ssl_->version == SSL2_VERSION) return ec; // Otherwise, the peer should have negotiated a proper shutdown. if ((::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) == 0) { ec = asio::error_code( ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ), asio::error::get_ssl_category()); } return ec; } asio::detail::static_mutex& engine::accept_mutex() { static asio::detail::static_mutex mutex = ASIO_STATIC_MUTEX_INIT; return mutex; } engine::want engine::perform(int (engine::* op)(void*, std::size_t), void* data, std::size_t length, asio::error_code& ec, std::size_t* bytes_transferred) { std::size_t pending_output_before = ::BIO_ctrl_pending(ext_bio_); ::ERR_clear_error(); int result = (this->*op)(data, length); int ssl_error = ::SSL_get_error(ssl_, result); int sys_error = static_cast(::ERR_get_error()); std::size_t pending_output_after = ::BIO_ctrl_pending(ext_bio_); if (ssl_error == SSL_ERROR_SSL) { ec = asio::error_code(sys_error, asio::error::get_ssl_category()); return want_nothing; } if (ssl_error == SSL_ERROR_SYSCALL) { ec = asio::error_code(sys_error, asio::error::get_system_category()); return want_nothing; } if (result > 0 && bytes_transferred) *bytes_transferred = static_cast(result); if (ssl_error == SSL_ERROR_WANT_WRITE) { ec = asio::error_code(); return want_output_and_retry; } else if (pending_output_after > pending_output_before) { ec = asio::error_code(); return result > 0 ? want_output : want_output_and_retry; } else if (ssl_error == SSL_ERROR_WANT_READ) { ec = asio::error_code(); return want_input_and_retry; } else if (::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) { ec = asio::error::eof; return want_nothing; } else { ec = asio::error_code(); return want_nothing; } } int engine::do_accept(void*, std::size_t) { asio::detail::static_mutex::scoped_lock lock(accept_mutex()); return ::SSL_accept(ssl_); } int engine::do_connect(void*, std::size_t) { return ::SSL_connect(ssl_); } int engine::do_shutdown(void*, std::size_t) { int result = ::SSL_shutdown(ssl_); if (result == 0) result = ::SSL_shutdown(ssl_); return result; } int engine::do_read(void* data, std::size_t length) { return ::SSL_read(ssl_, data, length < INT_MAX ? static_cast(length) : INT_MAX); } int engine::do_write(void* data, std::size_t length) { return ::SSL_write(ssl_, data, length < INT_MAX ? static_cast(length) : INT_MAX); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IMPL_ENGINE_IPP galera-3-25.3.20/asio/asio/ssl/detail/openssl_init.hpp0000644000015300001660000000555613042054732022321 0ustar jenkinsjenkins// // ssl/detail/openssl_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_OPENSSL_INIT_HPP #define ASIO_SSL_DETAIL_OPENSSL_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/shared_ptr.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class openssl_init_base : private noncopyable { protected: // Class that performs the actual initialisation. class do_init; // Helper function to manage a do_init singleton. The static instance of the // openssl_init object ensures that this function is always called before // main, and therefore before any other threads can get started. The do_init // instance must be static in this function to ensure that it gets // initialised before any other global objects try to use it. ASIO_DECL static asio::detail::shared_ptr instance(); #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) // Get an empty stack of compression methods, to be used when disabling // compression. ASIO_DECL static STACK_OF(SSL_COMP)* get_null_compression_methods(); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) }; template class openssl_init : private openssl_init_base { public: // Constructor. openssl_init() : ref_(instance()) { using namespace std; // For memmove. // Ensure openssl_init::instance_ is linked in. openssl_init* tmp = &instance_; memmove(&tmp, &tmp, sizeof(openssl_init*)); } // Destructor. ~openssl_init() { } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) using openssl_init_base::get_null_compression_methods; #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) private: // Instance to force initialisation of openssl at global scope. static openssl_init instance_; // Reference to singleton do_init object to ensure that openssl does not get // cleaned up until the last user has finished with it. asio::detail::shared_ptr ref_; }; template openssl_init openssl_init::instance_; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/detail/impl/openssl_init.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_DETAIL_OPENSSL_INIT_HPP galera-3-25.3.20/asio/asio/ssl/basic_context.hpp0000644000015300001660000000164213042054732021166 0ustar jenkinsjenkins// // ssl/basic_context.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_BASIC_CONTEXT_HPP #define ASIO_SSL_BASIC_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/basic_context.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::basic_context; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_BASIC_CONTEXT_HPP galera-3-25.3.20/asio/asio/ssl/stream.hpp0000644000015300001660000006214413042054732017640 0ustar jenkinsjenkins// // ssl/stream.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_HPP #define ASIO_SSL_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/stream.hpp" #else // defined(ASIO_ENABLE_OLD_SSL) # include "asio/async_result.hpp" # include "asio/detail/buffer_sequence_adapter.hpp" # include "asio/detail/handler_type_requirements.hpp" # include "asio/detail/noncopyable.hpp" # include "asio/detail/type_traits.hpp" # include "asio/ssl/context.hpp" # include "asio/ssl/detail/buffered_handshake_op.hpp" # include "asio/ssl/detail/handshake_op.hpp" # include "asio/ssl/detail/io.hpp" # include "asio/ssl/detail/read_op.hpp" # include "asio/ssl/detail/shutdown_op.hpp" # include "asio/ssl/detail/stream_core.hpp" # include "asio/ssl/detail/write_op.hpp" # include "asio/ssl/stream_base.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::stream; #else // defined(ASIO_ENABLE_OLD_SSL) /// Provides stream-oriented functionality using SSL. /** * The stream class template provides asynchronous and blocking stream-oriented * functionality using SSL. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. The application must also ensure that all * asynchronous operations are performed within the same implicit or explicit * strand. * * @par Example * To use the SSL stream template with an ip::tcp::socket, you would write: * @code * asio::io_service io_service; * asio::ssl::context ctx(asio::ssl::context::sslv23); * asio::ssl::stream sock(io_service, ctx); * @endcode * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class stream : public stream_base, private noncopyable { public: /// The native handle type of the SSL stream. typedef SSL* native_handle_type; /// Structure for use with deprecated impl_type. struct impl_struct { SSL* ssl; }; /// (Deprecated: Use native_handle_type.) The underlying implementation type. typedef impl_struct* impl_type; /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// Construct a stream. /** * This constructor creates a stream and initialises the underlying stream * object. * * @param arg The argument to be passed to initialise the underlying stream. * * @param ctx The SSL context to be used for the stream. */ template stream(Arg& arg, context& ctx) : next_layer_(arg), core_(ctx.native_handle(), next_layer_.lowest_layer().get_io_service()) { backwards_compatible_impl_.ssl = core_.engine_.native_handle(); } /// Destructor. ~stream() { } /// Get the io_service associated with the object. /** * This function may be used to obtain the io_service object that the stream * uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that stream will use to * dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return next_layer_.lowest_layer().get_io_service(); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. * * @par Example * The native_handle() function returns a pointer of type @c SSL* that is * suitable for passing to functions such as @c SSL_get_verify_result and * @c SSL_get_peer_certificate: * @code * asio::ssl::stream sock(io_service, ctx); * * // ... establish connection and perform handshake ... * * if (X509* cert = SSL_get_peer_certificate(sock.native_handle())) * { * if (SSL_get_verify_result(sock.native_handle()) == X509_V_OK) * { * // ... * } * } * @endcode */ native_handle_type native_handle() { return core_.engine_.native_handle(); } /// (Deprecated: Use native_handle().) Get the underlying implementation in /// the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to stream functionality that is * not otherwise provided. */ impl_type impl() { return &backwards_compatible_impl_; } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ const next_layer_type& next_layer() const { return next_layer_; } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the stream. The new mode will override the mode inherited from the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify. */ void set_verify_mode(verify_mode v) { asio::error_code ec; set_verify_mode(v, ec); asio::detail::throw_error(ec, "set_verify_mode"); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the stream. The new mode will override the mode inherited from the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify. */ asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec) { return core_.engine_.set_verify_mode(v, ec); } /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the stream. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify_depth. */ void set_verify_depth(int depth) { asio::error_code ec; set_verify_depth(depth, ec); asio::detail::throw_error(ec, "set_verify_depth"); } /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the stream. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify_depth. */ asio::error_code set_verify_depth( int depth, asio::error_code& ec) { return core_.engine_.set_verify_depth(depth, ec); } /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify. */ template void set_verify_callback(VerifyCallback callback) { asio::error_code ec; this->set_verify_callback(callback, ec); asio::detail::throw_error(ec, "set_verify_callback"); } /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify. */ template asio::error_code set_verify_callback(VerifyCallback callback, asio::error_code& ec) { return core_.engine_.set_verify_callback( new detail::verify_callback(callback), ec); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @throws asio::system_error Thrown on failure. */ void handshake(handshake_type type) { asio::error_code ec; handshake(type, ec); asio::detail::throw_error(ec, "handshake"); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code handshake(handshake_type type, asio::error_code& ec) { detail::io(next_layer_, core_, detail::handshake_op(type), ec); return ec; } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. * * @throws asio::system_error Thrown on failure. */ template void handshake(handshake_type type, const ConstBufferSequence& buffers) { asio::error_code ec; handshake(type, buffers, ec); asio::detail::throw_error(ec, "handshake"); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. * * @param ec Set to indicate what error occurred, if any. */ template asio::error_code handshake(handshake_type type, const ConstBufferSequence& buffers, asio::error_code& ec) { detail::io(next_layer_, core_, detail::buffered_handshake_op(type, buffers), ec); return ec; } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(HandshakeHandler, void (asio::error_code)) async_handshake(handshake_type type, ASIO_MOVE_ARG(HandshakeHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a HandshakeHandler. ASIO_HANDSHAKE_HANDLER_CHECK(HandshakeHandler, handler) type_check; asio::detail::async_result_init< HandshakeHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(HandshakeHandler)(handler)); detail::async_io(next_layer_, core_, detail::handshake_op(type), init.handler); return init.result.get(); } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. Although * the buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Amount of buffers used in handshake. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(BufferedHandshakeHandler, void (asio::error_code, std::size_t)) async_handshake(handshake_type type, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(BufferedHandshakeHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a BufferedHandshakeHandler. ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( BufferedHandshakeHandler, handler) type_check; asio::detail::async_result_init init( ASIO_MOVE_CAST(BufferedHandshakeHandler)(handler)); detail::async_io(next_layer_, core_, detail::buffered_handshake_op(type, buffers), init.handler); return init.result.get(); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @throws asio::system_error Thrown on failure. */ void shutdown() { asio::error_code ec; shutdown(ec); asio::detail::throw_error(ec, "shutdown"); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code shutdown(asio::error_code& ec) { detail::io(next_layer_, core_, detail::shutdown_op(), ec); return ec; } /// Asynchronously shut down SSL on the stream. /** * This function is used to asynchronously shut down SSL on the stream. This * function call always returns immediately. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(ShutdownHandler, void (asio::error_code)) async_shutdown(ASIO_MOVE_ARG(ShutdownHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ShutdownHandler. ASIO_SHUTDOWN_HANDLER_CHECK(ShutdownHandler, handler) type_check; asio::detail::async_result_init< ShutdownHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ShutdownHandler)(handler)); detail::async_io(next_layer_, core_, detail::shutdown_op(), init.handler); return init.result.get(); } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t n = write_some(buffers, ec); asio::detail::throw_error(ec, "write_some"); return n; } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written to the stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return detail::io(next_layer_, core_, detail::write_op(buffers), ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write one or more bytes of data to * the stream. The function call always returns immediately. * * @param buffers The data to be written to the stream. Although the buffers * object may be copied as necessary, ownership of the underlying buffers is * retained by the caller, which must guarantee that they remain valid until * the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * * @note The async_write_some operation may not transmit all of the data to * the peer. Consider using the @ref async_write function if you need to * ensure that all data is written before the blocking operation completes. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; asio::detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); detail::async_io(next_layer_, core_, detail::write_op(buffers), init.handler); return init.result.get(); } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t n = read_some(buffers, ec); asio::detail::throw_error(ec, "read_some"); return n; } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return detail::io(next_layer_, core_, detail::read_op(buffers), ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read one or more bytes of data from * the stream. The function call always returns immediately. * * @param buffers The buffers into which the data will be read. Although the * buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * * @note The async_read_some operation may not read all of the requested * number of bytes. Consider using the @ref async_read function if you need to * ensure that the requested amount of data is read before the asynchronous * operation completes. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; asio::detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); detail::async_io(next_layer_, core_, detail::read_op(buffers), init.handler); return init.result.get(); } private: Stream next_layer_; detail::stream_core core_; impl_struct backwards_compatible_impl_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_HPP galera-3-25.3.20/asio/asio/ssl/stream_service.hpp0000644000015300001660000000165113042054732021354 0ustar jenkinsjenkins// // ssl/stream_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_SERVICE_HPP #define ASIO_SSL_STREAM_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/stream_service.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::stream_service; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_SERVICE_HPP galera-3-25.3.20/asio/asio/ssl/old/0000755000015300001660000000000013042054732016403 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ssl/old/detail/0000755000015300001660000000000013042054732017645 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ssl/old/detail/openssl_operation.hpp0000644000015300001660000003247413042054732024133 0ustar jenkinsjenkins// // ssl/old/detail/openssl_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_DETAIL_OPENSSL_OPERATION_HPP #define ASIO_SSL_OLD_DETAIL_OPENSSL_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/buffer.hpp" #include "asio/detail/assert.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/placeholders.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/ssl/error.hpp" #include "asio/strand.hpp" #include "asio/system_error.hpp" #include "asio/write.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { namespace detail { typedef boost::function ssl_primitive_func; typedef boost::function user_handler_func; // Network send_/recv buffer implementation // // class net_buffer { static const int NET_BUF_SIZE = 16*1024 + 256; // SSL record size + spare unsigned char buf_[NET_BUF_SIZE]; unsigned char* data_start_; unsigned char* data_end_; public: net_buffer() { data_start_ = data_end_ = buf_; } unsigned char* get_unused_start() { return data_end_; } unsigned char* get_data_start() { return data_start_; } size_t get_unused_len() { return (NET_BUF_SIZE - (data_end_ - buf_)); } size_t get_data_len() { return (data_end_ - data_start_); } void data_added(size_t count) { data_end_ += count; data_end_ = data_end_ > (buf_ + NET_BUF_SIZE)? (buf_ + NET_BUF_SIZE): data_end_; } void data_removed(size_t count) { data_start_ += count; if (data_start_ >= data_end_) reset(); } void reset() { data_start_ = buf_; data_end_ = buf_; } bool has_data() { return (data_start_ < data_end_); } }; // class net_buffer // // Operation class // // template class openssl_operation { public: // Constructor for asynchronous operations openssl_operation(ssl_primitive_func primitive, Stream& socket, net_buffer& recv_buf, SSL* session, BIO* ssl_bio, user_handler_func handler, asio::io_service::strand& strand ) : primitive_(primitive) , user_handler_(handler) , strand_(&strand) , recv_buf_(recv_buf) , socket_(socket) , ssl_bio_(ssl_bio) , session_(session) { write_ = boost::bind( &openssl_operation::do_async_write, this, boost::arg<1>(), boost::arg<2>() ); read_ = boost::bind( &openssl_operation::do_async_read, this ); handler_= boost::bind( &openssl_operation::async_user_handler, this, boost::arg<1>(), boost::arg<2>() ); } // Constructor for synchronous operations openssl_operation(ssl_primitive_func primitive, Stream& socket, net_buffer& recv_buf, SSL* session, BIO* ssl_bio) : primitive_(primitive) , strand_(0) , recv_buf_(recv_buf) , socket_(socket) , ssl_bio_(ssl_bio) , session_(session) { write_ = boost::bind( &openssl_operation::do_sync_write, this, boost::arg<1>(), boost::arg<2>() ); read_ = boost::bind( &openssl_operation::do_sync_read, this ); handler_ = boost::bind( &openssl_operation::sync_user_handler, this, boost::arg<1>(), boost::arg<2>() ); } // Start operation // In case of asynchronous it returns 0, in sync mode returns success code // or throws an error... int start() { int rc = primitive_( session_ ); bool is_operation_done = (rc > 0); // For connect/accept/shutdown, the operation // is done, when return code is 1 // for write, it is done, when is retcode > 0 // for read, it is done when retcode > 0 int error_code = !is_operation_done ? ::SSL_get_error( session_, rc ) : 0; int sys_error_code = ERR_get_error(); if (error_code == SSL_ERROR_SSL) return handler_(asio::error_code( sys_error_code, asio::error::get_ssl_category()), rc); bool is_read_needed = (error_code == SSL_ERROR_WANT_READ); bool is_write_needed = (error_code == SSL_ERROR_WANT_WRITE || ::BIO_ctrl_pending( ssl_bio_ )); bool is_shut_down_received = ((::SSL_get_shutdown( session_ ) & SSL_RECEIVED_SHUTDOWN) == SSL_RECEIVED_SHUTDOWN); bool is_shut_down_sent = ((::SSL_get_shutdown( session_ ) & SSL_SENT_SHUTDOWN) == SSL_SENT_SHUTDOWN); if (is_shut_down_sent && is_shut_down_received && is_operation_done && !is_write_needed) // SSL connection is shut down cleanly return handler_(asio::error_code(), 1); if (is_shut_down_received && !is_operation_done) // Shutdown has been requested, while we were reading or writing... // abort our action... return handler_(asio::error::shut_down, 0); if (!is_operation_done && !is_read_needed && !is_write_needed && !is_shut_down_sent) { // The operation has failed... It is not completed and does // not want network communication nor does want to send shutdown out... if (error_code == SSL_ERROR_SYSCALL) { return handler_(asio::error_code( sys_error_code, asio::error::system_category), rc); } else { return handler_(asio::error_code( sys_error_code, asio::error::get_ssl_category()), rc); } } if (!is_operation_done && !is_write_needed) { // We may have left over data that we can pass to SSL immediately if (recv_buf_.get_data_len() > 0) { // Pass the buffered data to SSL int written = ::BIO_write ( ssl_bio_, recv_buf_.get_data_start(), recv_buf_.get_data_len() ); if (written > 0) { recv_buf_.data_removed(written); } else if (written < 0) { if (!BIO_should_retry(ssl_bio_)) { // Some serios error with BIO.... return handler_(asio::error::no_recovery, 0); } } return start(); } else if (is_read_needed || (is_shut_down_sent && !is_shut_down_received)) { return read_(); } } // Continue with operation, flush any SSL data out to network... return write_(is_operation_done, rc); } // Private implementation private: typedef boost::function int_handler_func; typedef boost::function write_func; typedef boost::function read_func; ssl_primitive_func primitive_; user_handler_func user_handler_; asio::io_service::strand* strand_; write_func write_; read_func read_; int_handler_func handler_; net_buffer send_buf_; // buffers for network IO // The recv buffer is owned by the stream, not the operation, since there can // be left over bytes after passing the data up to the application, and these // bytes need to be kept around for the next read operation issued by the // application. net_buffer& recv_buf_; Stream& socket_; BIO* ssl_bio_; SSL* session_; // int sync_user_handler(const asio::error_code& error, int rc) { if (!error) return rc; throw asio::system_error(error); } int async_user_handler(asio::error_code error, int rc) { if (rc < 0) { if (!error) error = asio::error::no_recovery; rc = 0; } user_handler_(error, rc); return 0; } // Writes bytes asynchronously from SSL to NET int do_async_write(bool is_operation_done, int rc) { int len = ::BIO_ctrl_pending( ssl_bio_ ); if ( len ) { // There is something to write into net, do it... len = (int)send_buf_.get_unused_len() > len? len: send_buf_.get_unused_len(); if (len == 0) { // In case our send buffer is full, we have just to wait until // previous send to complete... return 0; } // Read outgoing data from bio len = ::BIO_read( ssl_bio_, send_buf_.get_unused_start(), len); if (len > 0) { unsigned char *data_start = send_buf_.get_unused_start(); send_buf_.data_added(len); ASIO_ASSERT(strand_); asio::async_write ( socket_, asio::buffer(data_start, len), strand_->wrap ( boost::bind ( &openssl_operation::async_write_handler, this, is_operation_done, rc, asio::placeholders::error, asio::placeholders::bytes_transferred ) ) ); return 0; } else if (!BIO_should_retry(ssl_bio_)) { // Seems like fatal error // reading from SSL BIO has failed... handler_(asio::error::no_recovery, 0); return 0; } } if (is_operation_done) { // Finish the operation, with success handler_(asio::error_code(), rc); return 0; } // OPeration is not done and writing to net has been made... // start operation again start(); return 0; } void async_write_handler(bool is_operation_done, int rc, const asio::error_code& error, size_t bytes_sent) { if (!error) { // Remove data from send buffer send_buf_.data_removed(bytes_sent); if (is_operation_done) handler_(asio::error_code(), rc); else // Since the operation was not completed, try it again... start(); } else handler_(error, rc); } int do_async_read() { // Wait for new data ASIO_ASSERT(strand_); socket_.async_read_some ( asio::buffer(recv_buf_.get_unused_start(), recv_buf_.get_unused_len()), strand_->wrap ( boost::bind ( &openssl_operation::async_read_handler, this, asio::placeholders::error, asio::placeholders::bytes_transferred ) ) ); return 0; } void async_read_handler(const asio::error_code& error, size_t bytes_recvd) { if (!error) { recv_buf_.data_added(bytes_recvd); // Pass the received data to SSL int written = ::BIO_write ( ssl_bio_, recv_buf_.get_data_start(), recv_buf_.get_data_len() ); if (written > 0) { recv_buf_.data_removed(written); } else if (written < 0) { if (!BIO_should_retry(ssl_bio_)) { // Some serios error with BIO.... handler_(asio::error::no_recovery, 0); return; } } // and try the SSL primitive again start(); } else { // Error in network level... // SSL can't continue either... handler_(error, 0); } } // Syncronous functions... int do_sync_write(bool is_operation_done, int rc) { int len = ::BIO_ctrl_pending( ssl_bio_ ); if ( len ) { // There is something to write into net, do it... len = (int)send_buf_.get_unused_len() > len? len: send_buf_.get_unused_len(); // Read outgoing data from bio len = ::BIO_read( ssl_bio_, send_buf_.get_unused_start(), len); if (len > 0) { size_t sent_len = asio::write( socket_, asio::buffer(send_buf_.get_unused_start(), len) ); send_buf_.data_added(len); send_buf_.data_removed(sent_len); } else if (!BIO_should_retry(ssl_bio_)) { // Seems like fatal error // reading from SSL BIO has failed... throw asio::system_error(asio::error::no_recovery); } } if (is_operation_done) // Finish the operation, with success return rc; // Operation is not finished, start again. return start(); } int do_sync_read() { size_t len = socket_.read_some ( asio::buffer(recv_buf_.get_unused_start(), recv_buf_.get_unused_len()) ); // Write data to ssl recv_buf_.data_added(len); // Pass the received data to SSL int written = ::BIO_write ( ssl_bio_, recv_buf_.get_data_start(), recv_buf_.get_data_len() ); if (written > 0) { recv_buf_.data_removed(written); } else if (written < 0) { if (!BIO_should_retry(ssl_bio_)) { // Some serios error with BIO.... throw asio::system_error(asio::error::no_recovery); } } // Try the operation again return start(); } }; // class openssl_operation } // namespace detail } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_DETAIL_OPENSSL_OPERATION_HPP galera-3-25.3.20/asio/asio/ssl/old/detail/openssl_context_service.hpp0000644000015300001660000002333113042054732025327 0ustar jenkinsjenkins// // ssl/old/detail/openssl_context_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_DETAIL_OPENSSL_CONTEXT_SERVICE_HPP #define ASIO_SSL_OLD_DETAIL_OPENSSL_CONTEXT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { namespace detail { class openssl_context_service : public asio::detail::service_base { public: // The native type of the context. typedef ::SSL_CTX* impl_type; // The type for the password callback function object. typedef boost::function password_callback_type; // Constructor. openssl_context_service(asio::io_service& io_service) : asio::detail::service_base(io_service) { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Return a null context implementation. static impl_type null() { return 0; } // Create a new context implementation. void create(impl_type& impl, context_base::method m) { switch (m) { #if defined(OPENSSL_NO_SSL2) case context_base::sslv2: case context_base::sslv2_client: case context_base::sslv2_server: asio::detail::throw_error(asio::error::invalid_argument); break; #else // defined(OPENSSL_NO_SSL2) case context_base::sslv2: impl = ::SSL_CTX_new(::SSLv2_method()); break; case context_base::sslv2_client: impl = ::SSL_CTX_new(::SSLv2_client_method()); break; case context_base::sslv2_server: impl = ::SSL_CTX_new(::SSLv2_server_method()); break; #endif // defined(OPENSSL_NO_SSL2) case context_base::sslv3: impl = ::SSL_CTX_new(::SSLv3_method()); break; case context_base::sslv3_client: impl = ::SSL_CTX_new(::SSLv3_client_method()); break; case context_base::sslv3_server: impl = ::SSL_CTX_new(::SSLv3_server_method()); break; case context_base::tlsv1: impl = ::SSL_CTX_new(::TLSv1_method()); break; case context_base::tlsv1_client: impl = ::SSL_CTX_new(::TLSv1_client_method()); break; case context_base::tlsv1_server: impl = ::SSL_CTX_new(::TLSv1_server_method()); break; case context_base::sslv23: impl = ::SSL_CTX_new(::SSLv23_method()); break; case context_base::sslv23_client: impl = ::SSL_CTX_new(::SSLv23_client_method()); break; case context_base::sslv23_server: impl = ::SSL_CTX_new(::SSLv23_server_method()); break; default: impl = ::SSL_CTX_new(0); break; } } // Destroy a context implementation. void destroy(impl_type& impl) { if (impl != null()) { if (impl->default_passwd_callback_userdata) { password_callback_type* callback = static_cast( impl->default_passwd_callback_userdata); delete callback; impl->default_passwd_callback_userdata = 0; } ::SSL_CTX_free(impl); impl = null(); } } // Set options on the context. asio::error_code set_options(impl_type& impl, context_base::options o, asio::error_code& ec) { ::SSL_CTX_set_options(impl, o); ec = asio::error_code(); return ec; } // Set peer verification mode. asio::error_code set_verify_mode(impl_type& impl, context_base::verify_mode v, asio::error_code& ec) { ::SSL_CTX_set_verify(impl, v, 0); ec = asio::error_code(); return ec; } // Load a certification authority file for performing verification. asio::error_code load_verify_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { if (::SSL_CTX_load_verify_locations(impl, filename.c_str(), 0) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Add a directory containing certification authority files to be used for // performing verification. asio::error_code add_verify_path(impl_type& impl, const std::string& path, asio::error_code& ec) { if (::SSL_CTX_load_verify_locations(impl, 0, path.c_str()) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use a certificate from a file. asio::error_code use_certificate_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } if (::SSL_CTX_use_certificate_file(impl, filename.c_str(), file_type) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use a certificate chain from a file. asio::error_code use_certificate_chain_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { if (::SSL_CTX_use_certificate_chain_file(impl, filename.c_str()) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use a private key from a file. asio::error_code use_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } if (::SSL_CTX_use_PrivateKey_file(impl, filename.c_str(), file_type) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use an RSA private key from a file. asio::error_code use_rsa_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } if (::SSL_CTX_use_RSAPrivateKey_file( impl, filename.c_str(), file_type) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use the specified file to obtain the temporary Diffie-Hellman parameters. asio::error_code use_tmp_dh_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { ::BIO* bio = ::BIO_new_file(filename.c_str(), "r"); if (!bio) { ec = asio::error::invalid_argument; return ec; } ::DH* dh = ::PEM_read_bio_DHparams(bio, 0, 0, 0); if (!dh) { ::BIO_free(bio); ec = asio::error::invalid_argument; return ec; } ::BIO_free(bio); int result = ::SSL_CTX_set_tmp_dh(impl, dh); ::DH_free(dh); if (result != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } static int password_callback(char* buf, int size, int purpose, void* data) { using namespace std; // For strncat and strlen. if (data) { password_callback_type* callback = static_cast(data); std::string passwd = (*callback)(static_cast(size), purpose ? context_base::for_writing : context_base::for_reading); *buf = '\0'; strncat(buf, passwd.c_str(), size); return strlen(buf); } return 0; } // Set the password callback. template asio::error_code set_password_callback(impl_type& impl, Password_Callback callback, asio::error_code& ec) { // Allocate callback function object if not already present. if (impl->default_passwd_callback_userdata) { password_callback_type* callback_function = static_cast( impl->default_passwd_callback_userdata); *callback_function = callback; } else { password_callback_type* callback_function = new password_callback_type(callback); impl->default_passwd_callback_userdata = callback_function; } // Set the password callback. SSL_CTX_set_default_passwd_cb(impl, &openssl_context_service::password_callback); ec = asio::error_code(); return ec; } private: // Ensure openssl is initialised. asio::ssl::detail::openssl_init<> init_; }; } // namespace detail } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_DETAIL_OPENSSL_CONTEXT_SERVICE_HPP galera-3-25.3.20/asio/asio/ssl/old/detail/openssl_stream_service.hpp0000644000015300001660000003565613042054732025153 0ustar jenkinsjenkins// // ssl/old/detail/stream_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_DETAIL_OPENSSL_STREAM_SERVICE_HPP #define ASIO_SSL_OLD_DETAIL_OPENSSL_STREAM_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include #include #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/basic_context.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/old/detail/openssl_operation.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/strand.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { namespace detail { class openssl_stream_service : public asio::detail::service_base { private: enum { max_buffer_size = INT_MAX }; //Base handler for asyncrhonous operations template class base_handler { public: typedef boost::function< void (const asio::error_code&, size_t)> func_t; base_handler(asio::io_service& io_service) : op_(NULL) , io_service_(io_service) , work_(io_service) {} void do_func(const asio::error_code& error, size_t size) { func_(error, size); } void set_operation(openssl_operation* op) { op_ = op; } void set_func(func_t func) { func_ = func; } ~base_handler() { delete op_; } private: func_t func_; openssl_operation* op_; asio::io_service& io_service_; asio::io_service::work work_; }; // class base_handler // Handler for asynchronous IO (write/read) operations template class io_handler : public base_handler { public: io_handler(Handler handler, asio::io_service& io_service) : base_handler(io_service) , handler_(handler) { this->set_func(boost::bind( &io_handler::handler_impl, this, boost::arg<1>(), boost::arg<2>() )); } private: Handler handler_; void handler_impl(const asio::error_code& error, size_t size) { std::auto_ptr > this_ptr(this); handler_(error, size); } }; // class io_handler // Handler for asyncrhonous handshake (connect, accept) functions template class handshake_handler : public base_handler { public: handshake_handler(Handler handler, asio::io_service& io_service) : base_handler(io_service) , handler_(handler) { this->set_func(boost::bind( &handshake_handler::handler_impl, this, boost::arg<1>(), boost::arg<2>() )); } private: Handler handler_; void handler_impl(const asio::error_code& error, size_t) { std::auto_ptr > this_ptr(this); handler_(error); } }; // class handshake_handler // Handler for asyncrhonous shutdown template class shutdown_handler : public base_handler { public: shutdown_handler(Handler handler, asio::io_service& io_service) : base_handler(io_service), handler_(handler) { this->set_func(boost::bind( &shutdown_handler::handler_impl, this, boost::arg<1>(), boost::arg<2>() )); } private: Handler handler_; void handler_impl(const asio::error_code& error, size_t) { std::auto_ptr > this_ptr(this); handler_(error); } }; // class shutdown_handler public: // The implementation type. typedef struct impl_struct { ::SSL* ssl; ::BIO* ext_bio; net_buffer recv_buf; } * impl_type; // Construct a new stream socket service for the specified io_service. explicit openssl_stream_service(asio::io_service& io_service) : asio::detail::service_base(io_service), strand_(io_service) { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Return a null stream implementation. impl_type null() const { return 0; } // Create a new stream implementation. template void create(impl_type& impl, Stream& /*next_layer*/, basic_context& context) { impl = new impl_struct; impl->ssl = ::SSL_new(context.impl()); ::SSL_set_mode(impl->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE); ::SSL_set_mode(impl->ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); ::BIO* int_bio = 0; impl->ext_bio = 0; ::BIO_new_bio_pair(&int_bio, 8192, &impl->ext_bio, 8192); ::SSL_set_bio(impl->ssl, int_bio, int_bio); } // Destroy a stream implementation. template void destroy(impl_type& impl, Stream& /*next_layer*/) { if (impl != 0) { ::BIO_free(impl->ext_bio); ::SSL_free(impl->ssl); delete impl; impl = 0; } } // Perform SSL handshaking. template asio::error_code handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, asio::error_code& ec) { try { openssl_operation op( type == stream_base::client ? &ssl_wrap::SSL_connect: &ssl_wrap::SSL_accept, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio); op.start(); } catch (asio::system_error& e) { ec = e.code(); return ec; } ec = asio::error_code(); return ec; } // Start an asynchronous SSL handshake. template void async_handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, Handler handler) { typedef handshake_handler connect_handler; connect_handler* local_handler = new connect_handler(handler, get_io_service()); openssl_operation* op = new openssl_operation ( type == stream_base::client ? &ssl_wrap::SSL_connect: &ssl_wrap::SSL_accept, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Shut down SSL on the stream. template asio::error_code shutdown(impl_type& impl, Stream& next_layer, asio::error_code& ec) { try { openssl_operation op( &ssl_wrap::SSL_shutdown, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio); op.start(); } catch (asio::system_error& e) { ec = e.code(); return ec; } ec = asio::error_code(); return ec; } // Asynchronously shut down SSL on the stream. template void async_shutdown(impl_type& impl, Stream& next_layer, Handler handler) { typedef shutdown_handler disconnect_handler; disconnect_handler* local_handler = new disconnect_handler(handler, get_io_service()); openssl_operation* op = new openssl_operation ( &ssl_wrap::SSL_shutdown, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Write some data to the stream. template std::size_t write_some(impl_type& impl, Stream& next_layer, const Const_Buffers& buffers, asio::error_code& ec) { size_t bytes_transferred = 0; try { asio::const_buffer buffer = asio::detail::buffer_sequence_adapter< asio::const_buffer, Const_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { ec = asio::error_code(); return 0; } boost::function send_func = boost::bind(boost::type(), &::SSL_write, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation op( send_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio ); bytes_transferred = static_cast(op.start()); } catch (asio::system_error& e) { ec = e.code(); return 0; } ec = asio::error_code(); return bytes_transferred; } // Start an asynchronous write. template void async_write_some(impl_type& impl, Stream& next_layer, const Const_Buffers& buffers, Handler handler) { typedef io_handler send_handler; asio::const_buffer buffer = asio::detail::buffer_sequence_adapter< asio::const_buffer, Const_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { get_io_service().post(asio::detail::bind_handler( handler, asio::error_code(), 0)); return; } send_handler* local_handler = new send_handler(handler, get_io_service()); boost::function send_func = boost::bind(boost::type(), &::SSL_write, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation* op = new openssl_operation ( send_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Read some data from the stream. template std::size_t read_some(impl_type& impl, Stream& next_layer, const Mutable_Buffers& buffers, asio::error_code& ec) { size_t bytes_transferred = 0; try { asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter< asio::mutable_buffer, Mutable_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { ec = asio::error_code(); return 0; } boost::function recv_func = boost::bind(boost::type(), &::SSL_read, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation op(recv_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio ); bytes_transferred = static_cast(op.start()); } catch (asio::system_error& e) { ec = e.code(); return 0; } ec = asio::error_code(); return bytes_transferred; } // Start an asynchronous read. template void async_read_some(impl_type& impl, Stream& next_layer, const Mutable_Buffers& buffers, Handler handler) { typedef io_handler recv_handler; asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter< asio::mutable_buffer, Mutable_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { get_io_service().post(asio::detail::bind_handler( handler, asio::error_code(), 0)); return; } recv_handler* local_handler = new recv_handler(handler, get_io_service()); boost::function recv_func = boost::bind(boost::type(), &::SSL_read, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation* op = new openssl_operation ( recv_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Peek at the incoming data on the stream. template std::size_t peek(impl_type& /*impl*/, Stream& /*next_layer*/, const Mutable_Buffers& /*buffers*/, asio::error_code& ec) { ec = asio::error_code(); return 0; } // Determine the amount of data that may be read without blocking. template std::size_t in_avail(impl_type& /*impl*/, Stream& /*next_layer*/, asio::error_code& ec) { ec = asio::error_code(); return 0; } private: asio::io_service::strand strand_; typedef asio::detail::mutex mutex_type; template struct ssl_wrap { static Mutex ssl_mutex_; static int SSL_accept(SSL *ssl) { typename Mutex::scoped_lock lock(ssl_mutex_); return ::SSL_accept(ssl); } static int SSL_connect(SSL *ssl) { typename Mutex::scoped_lock lock(ssl_mutex_); return ::SSL_connect(ssl); } static int SSL_shutdown(SSL *ssl) { typename Mutex::scoped_lock lock(ssl_mutex_); return ::SSL_shutdown(ssl); } }; }; template Mutex openssl_stream_service::ssl_wrap::ssl_mutex_; } // namespace detail } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_DETAIL_OPENSSL_STREAM_SERVICE_HPP galera-3-25.3.20/asio/asio/ssl/old/basic_context.hpp0000644000015300001660000003235113042054732021745 0ustar jenkinsjenkins// // ssl/old/basic_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_BASIC_CONTEXT_HPP #define ASIO_SSL_OLD_BASIC_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/context_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// SSL context. template class basic_context : public context_base, private boost::noncopyable { public: /// The type of the service that will be used to provide context operations. typedef Service service_type; /// The native implementation type of the SSL context. typedef typename service_type::impl_type impl_type; /// Constructor. basic_context(asio::io_service& io_service, method m) : service_(asio::use_service(io_service)), impl_(service_.null()) { service_.create(impl_, m); } /// Destructor. ~basic_context() { service_.destroy(impl_); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ impl_type impl() { return impl_; } /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @throws asio::system_error Thrown on failure. */ void set_options(options o) { asio::error_code ec; service_.set_options(impl_, o, ec); asio::detail::throw_error(ec); } /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code set_options(options o, asio::error_code& ec) { return service_.set_options(impl_, o, ec); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. The available verify_mode * values are defined in the context_base class. * * @throws asio::system_error Thrown on failure. */ void set_verify_mode(verify_mode v) { asio::error_code ec; service_.set_verify_mode(impl_, v, ec); asio::detail::throw_error(ec); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. The available verify_mode * values are defined in the context_base class. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code set_verify_mode(verify_mode v, asio::error_code& ec) { return service_.set_verify_mode(impl_, v, ec); } /// Load a certification authority file for performing verification. /** * This function is used to load one or more trusted certification authorities * from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @throws asio::system_error Thrown on failure. */ void load_verify_file(const std::string& filename) { asio::error_code ec; service_.load_verify_file(impl_, filename, ec); asio::detail::throw_error(ec); } /// Load a certification authority file for performing verification. /** * This function is used to load the certificates for one or more trusted * certification authorities from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code load_verify_file(const std::string& filename, asio::error_code& ec) { return service_.load_verify_file(impl_, filename, ec); } /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @throws asio::system_error Thrown on failure. */ void add_verify_path(const std::string& path) { asio::error_code ec; service_.add_verify_path(impl_, path, ec); asio::detail::throw_error(ec); } /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code add_verify_path(const std::string& path, asio::error_code& ec) { return service_.add_verify_path(impl_, path, ec); } /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. */ void use_certificate_file(const std::string& filename, file_format format) { asio::error_code ec; service_.use_certificate_file(impl_, filename, format, ec); asio::detail::throw_error(ec); } /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_certificate_file(const std::string& filename, file_format format, asio::error_code& ec) { return service_.use_certificate_file(impl_, filename, format, ec); } /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @throws asio::system_error Thrown on failure. */ void use_certificate_chain_file(const std::string& filename) { asio::error_code ec; service_.use_certificate_chain_file(impl_, filename, ec); asio::detail::throw_error(ec); } /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_certificate_chain_file( const std::string& filename, asio::error_code& ec) { return service_.use_certificate_chain_file(impl_, filename, ec); } /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. */ void use_private_key_file(const std::string& filename, file_format format) { asio::error_code ec; service_.use_private_key_file(impl_, filename, format, ec); asio::detail::throw_error(ec); } /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_private_key_file(const std::string& filename, file_format format, asio::error_code& ec) { return service_.use_private_key_file(impl_, filename, format, ec); } /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. */ void use_rsa_private_key_file(const std::string& filename, file_format format) { asio::error_code ec; service_.use_rsa_private_key_file(impl_, filename, format, ec); asio::detail::throw_error(ec); } /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_rsa_private_key_file( const std::string& filename, file_format format, asio::error_code& ec) { return service_.use_rsa_private_key_file(impl_, filename, format, ec); } /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @throws asio::system_error Thrown on failure. */ void use_tmp_dh_file(const std::string& filename) { asio::error_code ec; service_.use_tmp_dh_file(impl_, filename, ec); asio::detail::throw_error(ec); } /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_tmp_dh_file(const std::string& filename, asio::error_code& ec) { return service_.use_tmp_dh_file(impl_, filename, ec); } /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @throws asio::system_error Thrown on failure. */ template void set_password_callback(PasswordCallback callback) { asio::error_code ec; service_.set_password_callback(impl_, callback, ec); asio::detail::throw_error(ec); } /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @param ec Set to indicate what error occurred, if any. */ template asio::error_code set_password_callback(PasswordCallback callback, asio::error_code& ec) { return service_.set_password_callback(impl_, callback, ec); } private: /// The backend service implementation. service_type& service_; /// The underlying native implementation. impl_type impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_BASIC_CONTEXT_HPP galera-3-25.3.20/asio/asio/ssl/old/stream.hpp0000644000015300001660000004053713042054732020420 0ustar jenkinsjenkins// // ssl/old/stream.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_STREAM_HPP #define ASIO_SSL_OLD_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/ssl/basic_context.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/stream_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// Provides stream-oriented functionality using SSL. /** * The stream class template provides asynchronous and blocking stream-oriented * functionality using SSL. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * To use the SSL stream template with an ip::tcp::socket, you would write: * @code * asio::io_service io_service; * asio::ssl::context context(io_service, asio::ssl::context::sslv23); * asio::ssl::stream sock(io_service, context); * @endcode * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncRead_Stream, SyncWriteStream. */ template class stream : public stream_base, private boost::noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// The type of the service that will be used to provide stream operations. typedef Service service_type; /// The native implementation type of the stream. typedef typename service_type::impl_type impl_type; /// Construct a stream. /** * This constructor creates a stream and initialises the underlying stream * object. * * @param arg The argument to be passed to initialise the underlying stream. * * @param context The SSL context to be used for the stream. */ template explicit stream(Arg& arg, basic_context& context) : next_layer_(arg), service_(asio::use_service(next_layer_.get_io_service())), impl_(service_.null()) { service_.create(impl_, next_layer_, context); } /// Destructor. ~stream() { service_.destroy(impl_, next_layer_); } /// Get the io_service associated with the object. /** * This function may be used to obtain the io_service object that the stream * uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that stream will use to * dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return next_layer_.get_io_service(); } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * stream layers. * * @return A const reference to the lowest layer in the stack of stream * layers. Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to stream functionality that is * not otherwise provided. */ impl_type impl() { return impl_; } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @throws asio::system_error Thrown on failure. */ void handshake(handshake_type type) { asio::error_code ec; service_.handshake(impl_, next_layer_, type, ec); asio::detail::throw_error(ec); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code handshake(handshake_type type, asio::error_code& ec) { return service_.handshake(impl_, next_layer_, type, ec); } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template void async_handshake(handshake_type type, HandshakeHandler handler) { service_.async_handshake(impl_, next_layer_, type, handler); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @throws asio::system_error Thrown on failure. */ void shutdown() { asio::error_code ec; service_.shutdown(impl_, next_layer_, ec); asio::detail::throw_error(ec); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code shutdown(asio::error_code& ec) { return service_.shutdown(impl_, next_layer_, ec); } /// Asynchronously shut down SSL on the stream. /** * This function is used to asynchronously shut down SSL on the stream. This * function call always returns immediately. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template void async_shutdown(ShutdownHandler handler) { service_.async_shutdown(impl_, next_layer_, handler); } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = service_.write_some(impl_, next_layer_, buffers, ec); asio::detail::throw_error(ec); return s; } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written to the stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return service_.write_some(impl_, next_layer_, buffers, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write one or more bytes of data to * the stream. The function call always returns immediately. * * @param buffers The data to be written to the stream. Although the buffers * object may be copied as necessary, ownership of the underlying buffers is * retained by the caller, which must guarantee that they remain valid until * the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * * @note The async_write_some operation may not transmit all of the data to * the peer. Consider using the @ref async_write function if you need to * ensure that all data is written before the blocking operation completes. */ template void async_write_some(const ConstBufferSequence& buffers, WriteHandler handler) { service_.async_write_some(impl_, next_layer_, buffers, handler); } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = service_.read_some(impl_, next_layer_, buffers, ec); asio::detail::throw_error(ec); return s; } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return service_.read_some(impl_, next_layer_, buffers, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read one or more bytes of data from * the stream. The function call always returns immediately. * * @param buffers The buffers into which the data will be read. Although the * buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * * @note The async_read_some operation may not read all of the requested * number of bytes. Consider using the @ref async_read function if you need to * ensure that the requested amount of data is read before the asynchronous * operation completes. */ template void async_read_some(const MutableBufferSequence& buffers, ReadHandler handler) { service_.async_read_some(impl_, next_layer_, buffers, handler); } /// Peek at the incoming data on the stream. /** * This function is used to peek at the incoming data on the stream, without * removing it from the input queue. The function call will block until data * has been read successfully or an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. */ template std::size_t peek(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = service_.peek(impl_, next_layer_, buffers, ec); asio::detail::throw_error(ec); return s; } /// Peek at the incoming data on the stream. /** * This function is used to peek at the incoming data on the stream, withoutxi * removing it from the input queue. The function call will block until data * has been read successfully or an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. */ template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return service_.peek(impl_, next_layer_, buffers, ec); } /// Determine the amount of data that may be read without blocking. /** * This function is used to determine the amount of data, in bytes, that may * be read from the stream without blocking. * * @returns The number of bytes of data that can be read without blocking. * * @throws asio::system_error Thrown on failure. */ std::size_t in_avail() { asio::error_code ec; std::size_t s = service_.in_avail(impl_, next_layer_, ec); asio::detail::throw_error(ec); return s; } /// Determine the amount of data that may be read without blocking. /** * This function is used to determine the amount of data, in bytes, that may * be read from the stream without blocking. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes of data that can be read without blocking. */ std::size_t in_avail(asio::error_code& ec) { return service_.in_avail(impl_, next_layer_, ec); } private: /// The next layer. Stream next_layer_; /// The backend service implementation. service_type& service_; /// The underlying native implementation. impl_type impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_STREAM_HPP galera-3-25.3.20/asio/asio/ssl/old/stream_service.hpp0000644000015300001660000001274013042054732022133 0ustar jenkinsjenkins// // ssl/old/stream_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_STREAM_SERVICE_HPP #define ASIO_SSL_OLD_STREAM_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/io_service.hpp" #include "asio/ssl/basic_context.hpp" #include "asio/ssl/old/detail/openssl_stream_service.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// Default service implementation for an SSL stream. class stream_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { private: // The type of the platform-specific implementation. typedef old::detail::openssl_stream_service service_impl_type; public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The type of a stream implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined impl_type; #else typedef service_impl_type::impl_type impl_type; #endif /// Construct a new stream service for the specified io_service. explicit stream_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(asio::use_service(io_service)) { } /// Return a null stream implementation. impl_type null() const { return service_impl_.null(); } /// Create a new stream implementation. template void create(impl_type& impl, Stream& next_layer, basic_context& context) { service_impl_.create(impl, next_layer, context); } /// Destroy a stream implementation. template void destroy(impl_type& impl, Stream& next_layer) { service_impl_.destroy(impl, next_layer); } /// Perform SSL handshaking. template asio::error_code handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, asio::error_code& ec) { return service_impl_.handshake(impl, next_layer, type, ec); } /// Start an asynchronous SSL handshake. template void async_handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, HandshakeHandler handler) { service_impl_.async_handshake(impl, next_layer, type, handler); } /// Shut down SSL on the stream. template asio::error_code shutdown(impl_type& impl, Stream& next_layer, asio::error_code& ec) { return service_impl_.shutdown(impl, next_layer, ec); } /// Asynchronously shut down SSL on the stream. template void async_shutdown(impl_type& impl, Stream& next_layer, ShutdownHandler handler) { service_impl_.async_shutdown(impl, next_layer, handler); } /// Write some data to the stream. template std::size_t write_some(impl_type& impl, Stream& next_layer, const ConstBufferSequence& buffers, asio::error_code& ec) { return service_impl_.write_some(impl, next_layer, buffers, ec); } /// Start an asynchronous write. template void async_write_some(impl_type& impl, Stream& next_layer, const ConstBufferSequence& buffers, WriteHandler handler) { service_impl_.async_write_some(impl, next_layer, buffers, handler); } /// Read some data from the stream. template std::size_t read_some(impl_type& impl, Stream& next_layer, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.read_some(impl, next_layer, buffers, ec); } /// Start an asynchronous read. template void async_read_some(impl_type& impl, Stream& next_layer, const MutableBufferSequence& buffers, ReadHandler handler) { service_impl_.async_read_some(impl, next_layer, buffers, handler); } /// Peek at the incoming data on the stream. template std::size_t peek(impl_type& impl, Stream& next_layer, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.peek(impl, next_layer, buffers, ec); } /// Determine the amount of data that may be read without blocking. template std::size_t in_avail(impl_type& impl, Stream& next_layer, asio::error_code& ec) { return service_impl_.in_avail(impl, next_layer, ec); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // The service that provides the platform-specific implementation. service_impl_type& service_impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_STREAM_SERVICE_HPP galera-3-25.3.20/asio/asio/ssl/old/context_service.hpp0000644000015300001660000001165713042054732022332 0ustar jenkinsjenkins// // ssl/old/context_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_CONTEXT_SERVICE_HPP #define ASIO_SSL_OLD_CONTEXT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/old/detail/openssl_context_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// Default service implementation for a context. class context_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { private: // The type of the platform-specific implementation. typedef old::detail::openssl_context_service service_impl_type; public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The type of the context. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined impl_type; #else typedef service_impl_type::impl_type impl_type; #endif /// Constructor. explicit context_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(asio::use_service(io_service)) { } /// Return a null context implementation. impl_type null() const { return service_impl_.null(); } /// Create a new context implementation. void create(impl_type& impl, context_base::method m) { service_impl_.create(impl, m); } /// Destroy a context implementation. void destroy(impl_type& impl) { service_impl_.destroy(impl); } /// Set options on the context. asio::error_code set_options(impl_type& impl, context_base::options o, asio::error_code& ec) { return service_impl_.set_options(impl, o, ec); } /// Set peer verification mode. asio::error_code set_verify_mode(impl_type& impl, context_base::verify_mode v, asio::error_code& ec) { return service_impl_.set_verify_mode(impl, v, ec); } /// Load a certification authority file for performing verification. asio::error_code load_verify_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { return service_impl_.load_verify_file(impl, filename, ec); } /// Add a directory containing certification authority files to be used for /// performing verification. asio::error_code add_verify_path(impl_type& impl, const std::string& path, asio::error_code& ec) { return service_impl_.add_verify_path(impl, path, ec); } /// Use a certificate from a file. asio::error_code use_certificate_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { return service_impl_.use_certificate_file(impl, filename, format, ec); } /// Use a certificate chain from a file. asio::error_code use_certificate_chain_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { return service_impl_.use_certificate_chain_file(impl, filename, ec); } /// Use a private key from a file. asio::error_code use_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { return service_impl_.use_private_key_file(impl, filename, format, ec); } /// Use an RSA private key from a file. asio::error_code use_rsa_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { return service_impl_.use_rsa_private_key_file(impl, filename, format, ec); } /// Use the specified file to obtain the temporary Diffie-Hellman parameters. asio::error_code use_tmp_dh_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { return service_impl_.use_tmp_dh_file(impl, filename, ec); } /// Set the password callback. template asio::error_code set_password_callback(impl_type& impl, PasswordCallback callback, asio::error_code& ec) { return service_impl_.set_password_callback(impl, callback, ec); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // The service that provides the platform-specific implementation. service_impl_type& service_impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_CONTEXT_SERVICE_HPP galera-3-25.3.20/asio/asio/ssl/context_service.hpp0000644000015300001660000000166213042054732021547 0ustar jenkinsjenkins// // ssl/context_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_SERVICE_HPP #define ASIO_SSL_CONTEXT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/context_service.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::context_service; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_CONTEXT_SERVICE_HPP galera-3-25.3.20/asio/asio/ssl/context_base.hpp0000644000015300001660000001111513042054732021013 0ustar jenkinsjenkins// // ssl/context_base.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_BASE_HPP #define ASIO_SSL_CONTEXT_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// The context_base class is used as a base for the basic_context class /// template so that we have a common place to define various enums. class context_base { public: /// Different methods supported by a context. enum method { /// Generic SSL version 2. sslv2, /// SSL version 2 client. sslv2_client, /// SSL version 2 server. sslv2_server, /// Generic SSL version 3. sslv3, /// SSL version 3 client. sslv3_client, /// SSL version 3 server. sslv3_server, /// Generic TLS version 1. tlsv1, /// TLS version 1 client. tlsv1_client, /// TLS version 1 server. tlsv1_server, /// Generic SSL/TLS. sslv23, /// SSL/TLS client. sslv23_client, /// SSL/TLS server. sslv23_server, /// Generic TLS version 1.1. tlsv11, /// TLS version 1.1 client. tlsv11_client, /// TLS version 1.1 server. tlsv11_server, /// Generic TLS version 1.2. tlsv12, /// TLS version 1.2 client. tlsv12_client, /// TLS version 1.2 server. tlsv12_server }; /// Bitmask type for SSL options. typedef long options; #if defined(GENERATING_DOCUMENTATION) /// Implement various bug workarounds. static const long default_workarounds = implementation_defined; /// Always create a new key when using tmp_dh parameters. static const long single_dh_use = implementation_defined; /// Disable SSL v2. static const long no_sslv2 = implementation_defined; /// Disable SSL v3. static const long no_sslv3 = implementation_defined; /// Disable TLS v1. static const long no_tlsv1 = implementation_defined; /// Disable TLS v1.1. static const long no_tlsv1_1 = implementation_defined; /// Disable TLS v1.2. static const long no_tlsv1_2 = implementation_defined; /// Disable compression. Compression is disabled by default. static const long no_compression = implementation_defined; #else ASIO_STATIC_CONSTANT(long, default_workarounds = SSL_OP_ALL); ASIO_STATIC_CONSTANT(long, single_dh_use = SSL_OP_SINGLE_DH_USE); ASIO_STATIC_CONSTANT(long, no_sslv2 = SSL_OP_NO_SSLv2); ASIO_STATIC_CONSTANT(long, no_sslv3 = SSL_OP_NO_SSLv3); ASIO_STATIC_CONSTANT(long, no_tlsv1 = SSL_OP_NO_TLSv1); # if defined(SSL_OP_NO_TLSv1_1) ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = SSL_OP_NO_TLSv1_1); # else // defined(SSL_OP_NO_TLSv1_1) ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = 0x10000000L); # endif // defined(SSL_OP_NO_TLSv1_1) # if defined(SSL_OP_NO_TLSv1_2) ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = SSL_OP_NO_TLSv1_2); # else // defined(SSL_OP_NO_TLSv1_2) ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = 0x08000000L); # endif // defined(SSL_OP_NO_TLSv1_2) # if defined(SSL_OP_NO_COMPRESSION) ASIO_STATIC_CONSTANT(long, no_compression = SSL_OP_NO_COMPRESSION); # else // defined(SSL_OP_NO_COMPRESSION) ASIO_STATIC_CONSTANT(long, no_compression = 0x20000L); # endif // defined(SSL_OP_NO_COMPRESSION) #endif /// File format types. enum file_format { /// ASN.1 file. asn1, /// PEM file. pem }; #if !defined(GENERATING_DOCUMENTATION) // The following types and constants are preserved for backward compatibility. // New programs should use the equivalents of the same names that are defined // in the asio::ssl namespace. typedef int verify_mode; ASIO_STATIC_CONSTANT(int, verify_none = SSL_VERIFY_NONE); ASIO_STATIC_CONSTANT(int, verify_peer = SSL_VERIFY_PEER); ASIO_STATIC_CONSTANT(int, verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT); ASIO_STATIC_CONSTANT(int, verify_client_once = SSL_VERIFY_CLIENT_ONCE); #endif /// Purpose of PEM password. enum password_purpose { /// The password is needed for reading/decryption. for_reading, /// The password is needed for writing/encryption. for_writing }; protected: /// Protected destructor to prevent deletion through this type. ~context_base() { } }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_CONTEXT_BASE_HPP galera-3-25.3.20/asio/asio/ssl/stream_base.hpp0000644000015300001660000000216613042054732020630 0ustar jenkinsjenkins// // ssl/stream_base.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_BASE_HPP #define ASIO_SSL_STREAM_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// The stream_base class is used as a base for the asio::ssl::stream /// class template so that we have a common place to define various enums. class stream_base { public: /// Different handshake types. enum handshake_type { /// Perform handshaking as a client. client, /// Perform handshaking as a server. server }; protected: /// Protected destructor to prevent deletion through this type. ~stream_base() { } }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_BASE_HPP galera-3-25.3.20/asio/asio/ssl/verify_mode.hpp0000644000015300001660000000321513042054732020647 0ustar jenkinsjenkins// // ssl/verify_mode.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_VERIFY_MODE_HPP #define ASIO_SSL_VERIFY_MODE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// Bitmask type for peer verification. /** * Possible values are: * * @li @ref verify_none * @li @ref verify_peer * @li @ref verify_fail_if_no_peer_cert * @li @ref verify_client_once */ typedef int verify_mode; #if defined(GENERATING_DOCUMENTATION) /// No verification. const int verify_none = implementation_defined; /// Verify the peer. const int verify_peer = implementation_defined; /// Fail verification if the peer has no certificate. Ignored unless /// @ref verify_peer is set. const int verify_fail_if_no_peer_cert = implementation_defined; /// Do not request client certificate on renegotiation. Ignored unless /// @ref verify_peer is set. const int verify_client_once = implementation_defined; #else const int verify_none = SSL_VERIFY_NONE; const int verify_peer = SSL_VERIFY_PEER; const int verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT; const int verify_client_once = SSL_VERIFY_CLIENT_ONCE; #endif } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_VERIFY_MODE_HPP galera-3-25.3.20/asio/asio/ssl/error.hpp0000644000015300001660000000260113042054732017466 0ustar jenkinsjenkins// // ssl/error.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_ERROR_HPP #define ASIO_SSL_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { enum ssl_errors { }; extern ASIO_DECL const asio::error_category& get_ssl_category(); static const asio::error_category& ssl_category = asio::error::get_ssl_category(); } // namespace error } // namespace asio #if defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace std { template<> struct is_error_code_enum { static const bool value = true; }; } // namespace std #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace asio { namespace error { inline asio::error_code make_error_code(ssl_errors e) { return asio::error_code( static_cast(e), get_ssl_category()); } } // namespace error } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_ERROR_HPP galera-3-25.3.20/asio/asio/ssl/rfc2818_verification.hpp0000644000015300001660000000545513042054732022206 0ustar jenkinsjenkins// // ssl/rfc2818_verification.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_RFC2818_VERIFICATION_HPP #define ASIO_SSL_RFC2818_VERIFICATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include "asio/ssl/detail/openssl_types.hpp" # include "asio/ssl/verify_context.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) /// Verifies a certificate against a hostname according to the rules described /// in RFC 2818. /** * @par Example * The following example shows how to synchronously open a secure connection to * a given host name: * @code * using asio::ip::tcp; * namespace ssl = asio::ssl; * typedef ssl::stream ssl_socket; * * // Create a context that uses the default paths for finding CA certificates. * ssl::context ctx(ssl::context::sslv23); * ctx.set_default_verify_paths(); * * // Open a socket and connect it to the remote host. * asio::io_service io_service; * ssl_socket sock(io_service, ctx); * tcp::resolver resolver(io_service); * tcp::resolver::query query("host.name", "https"); * asio::connect(sock.lowest_layer(), resolver.resolve(query)); * sock.lowest_layer().set_option(tcp::no_delay(true)); * * // Perform SSL handshake and verify the remote host's certificate. * sock.set_verify_mode(ssl::verify_peer); * sock.set_verify_callback(ssl::rfc2818_verification("host.name")); * sock.handshake(ssl_socket::client); * * // ... read and write as normal ... * @endcode */ class rfc2818_verification { public: /// The type of the function object's result. typedef bool result_type; /// Constructor. explicit rfc2818_verification(const std::string& host) : host_(host) { } /// Perform certificate verification. ASIO_DECL bool operator()(bool preverified, verify_context& ctx) const; private: // Helper function to check a host name against a pattern. ASIO_DECL static bool match_pattern(const char* pattern, std::size_t pattern_length, const char* host); // Helper function to check a host name against an IPv4 address // The host name to be checked. std::string host_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/rfc2818_verification.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_RFC2818_VERIFICATION_HPP galera-3-25.3.20/asio/asio/ssl/verify_context.hpp0000644000015300001660000000342113042054732021406 0ustar jenkinsjenkins// // ssl/verify_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_VERIFY_CONTEXT_HPP #define ASIO_SSL_VERIFY_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/noncopyable.hpp" # include "asio/ssl/detail/openssl_types.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) /// A simple wrapper around the X509_STORE_CTX type, used during verification of /// a peer certificate. /** * @note The verify_context does not own the underlying X509_STORE_CTX object. */ class verify_context : private noncopyable { public: /// The native handle type of the verification context. typedef X509_STORE_CTX* native_handle_type; /// Constructor. explicit verify_context(native_handle_type handle) : handle_(handle) { } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ native_handle_type native_handle() { return handle_; } private: // The underlying native implementation. native_handle_type handle_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_VERIFY_CONTEXT_HPP galera-3-25.3.20/asio/asio/ssl/context.hpp0000644000015300001660000006375113042054732020036 0ustar jenkinsjenkins// // ssl/context.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_HPP #define ASIO_SSL_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/basic_context.hpp" # include "asio/ssl/context_service.hpp" #else // defined(ASIO_ENABLE_OLD_SSL) # include # include "asio/buffer.hpp" # include "asio/io_service.hpp" # include "asio/ssl/context_base.hpp" # include "asio/ssl/detail/openssl_types.hpp" # include "asio/ssl/detail/openssl_init.hpp" # include "asio/ssl/detail/password_callback.hpp" # include "asio/ssl/detail/verify_callback.hpp" # include "asio/ssl/verify_mode.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) /// Typedef for the typical usage of context. typedef basic_context context; #else // defined(ASIO_ENABLE_OLD_SSL) class context : public context_base, private noncopyable { public: /// The native handle type of the SSL context. typedef SSL_CTX* native_handle_type; /// (Deprecated: Use native_handle_type.) The native type of the SSL context. typedef SSL_CTX* impl_type; /// Constructor. ASIO_DECL explicit context(method m); /// Deprecated constructor taking a reference to an io_service object. ASIO_DECL context(asio::io_service&, method m); #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a context from another. /** * This constructor moves an SSL context from one object to another. * * @param other The other context object from which the move will occur. * * @note Following the move, the following operations only are valid for the * moved-from object: * @li Destruction. * @li As a target for move-assignment. */ ASIO_DECL context(context&& other); /// Move-assign a context from another. /** * This assignment operator moves an SSL context from one object to another. * * @param other The other context object from which the move will occur. * * @note Following the move, the following operations only are valid for the * moved-from object: * @li Destruction. * @li As a target for move-assignment. */ ASIO_DECL context& operator=(context&& other); #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. ASIO_DECL ~context(); /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ ASIO_DECL native_handle_type native_handle(); /// (Deprecated: Use native_handle().) Get the underlying implementation in /// the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ ASIO_DECL impl_type impl(); /// Clear options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The specified options, if currently enabled on the * context, are cleared. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_clear_options. */ ASIO_DECL void clear_options(options o); /// Clear options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The specified options, if currently enabled on the * context, are cleared. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_clear_options. */ ASIO_DECL asio::error_code clear_options(options o, asio::error_code& ec); /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_options. */ ASIO_DECL void set_options(options o); /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_options. */ ASIO_DECL asio::error_code set_options(options o, asio::error_code& ec); /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify. */ ASIO_DECL void set_verify_mode(verify_mode v); /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify. */ ASIO_DECL asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec); /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the context. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify_depth. */ ASIO_DECL void set_verify_depth(int depth); /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the context. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify_depth. */ ASIO_DECL asio::error_code set_verify_depth( int depth, asio::error_code& ec); /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify. */ template void set_verify_callback(VerifyCallback callback); /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify. */ template asio::error_code set_verify_callback(VerifyCallback callback, asio::error_code& ec); /// Load a certification authority file for performing verification. /** * This function is used to load one or more trusted certification authorities * from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL void load_verify_file(const std::string& filename); /// Load a certification authority file for performing verification. /** * This function is used to load the certificates for one or more trusted * certification authorities from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL asio::error_code load_verify_file( const std::string& filename, asio::error_code& ec); /// Add certification authority for performing verification. /** * This function is used to add one trusted certification authority * from a memory buffer. * * @param ca The buffer containing the certification authority certificate. * The certificate must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert. */ ASIO_DECL void add_certificate_authority(const const_buffer& ca); /// Add certification authority for performing verification. /** * This function is used to add one trusted certification authority * from a memory buffer. * * @param ca The buffer containing the certification authority certificate. * The certificate must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert. */ ASIO_DECL asio::error_code add_certificate_authority( const const_buffer& ca, asio::error_code& ec); /// Configures the context to use the default directories for finding /// certification authority certificates. /** * This function specifies that the context should use the default, * system-dependent directories for locating certification authority * certificates. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_default_verify_paths. */ ASIO_DECL void set_default_verify_paths(); /// Configures the context to use the default directories for finding /// certification authority certificates. /** * This function specifies that the context should use the default, * system-dependent directories for locating certification authority * certificates. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_default_verify_paths. */ ASIO_DECL asio::error_code set_default_verify_paths( asio::error_code& ec); /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL void add_verify_path(const std::string& path); /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL asio::error_code add_verify_path( const std::string& path, asio::error_code& ec); /// Use a certificate from a memory buffer. /** * This function is used to load a certificate into the context from a buffer. * * @param certificate The buffer containing the certificate. * * @param format The certificate format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1. */ ASIO_DECL void use_certificate( const const_buffer& certificate, file_format format); /// Use a certificate from a memory buffer. /** * This function is used to load a certificate into the context from a buffer. * * @param certificate The buffer containing the certificate. * * @param format The certificate format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1. */ ASIO_DECL asio::error_code use_certificate( const const_buffer& certificate, file_format format, asio::error_code& ec); /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate_file. */ ASIO_DECL void use_certificate_file( const std::string& filename, file_format format); /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate_file. */ ASIO_DECL asio::error_code use_certificate_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use a certificate chain from a memory buffer. /** * This function is used to load a certificate chain into the context from a * buffer. * * @param chain The buffer containing the certificate chain. The certificate * chain must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert. */ ASIO_DECL void use_certificate_chain(const const_buffer& chain); /// Use a certificate chain from a memory buffer. /** * This function is used to load a certificate chain into the context from a * buffer. * * @param chain The buffer containing the certificate chain. The certificate * chain must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert. */ ASIO_DECL asio::error_code use_certificate_chain( const const_buffer& chain, asio::error_code& ec); /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate_chain_file. */ ASIO_DECL void use_certificate_chain_file(const std::string& filename); /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate_chain_file. */ ASIO_DECL asio::error_code use_certificate_chain_file( const std::string& filename, asio::error_code& ec); /// Use a private key from a memory buffer. /** * This function is used to load a private key into the context from a buffer. * * @param private_key The buffer containing the private key. * * @param format The private key format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1. */ ASIO_DECL void use_private_key( const const_buffer& private_key, file_format format); /// Use a private key from a memory buffer. /** * This function is used to load a private key into the context from a buffer. * * @param private_key The buffer containing the private key. * * @param format The private key format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1. */ ASIO_DECL asio::error_code use_private_key( const const_buffer& private_key, file_format format, asio::error_code& ec); /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_PrivateKey_file. */ ASIO_DECL void use_private_key_file( const std::string& filename, file_format format); /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_PrivateKey_file. */ ASIO_DECL asio::error_code use_private_key_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use an RSA private key from a memory buffer. /** * This function is used to load an RSA private key into the context from a * buffer. * * @param private_key The buffer containing the RSA private key. * * @param format The private key format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1. */ ASIO_DECL void use_rsa_private_key( const const_buffer& private_key, file_format format); /// Use an RSA private key from a memory buffer. /** * This function is used to load an RSA private key into the context from a * buffer. * * @param private_key The buffer containing the RSA private key. * * @param format The private key format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1. */ ASIO_DECL asio::error_code use_rsa_private_key( const const_buffer& private_key, file_format format, asio::error_code& ec); /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_RSAPrivateKey_file. */ ASIO_DECL void use_rsa_private_key_file( const std::string& filename, file_format format); /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_RSAPrivateKey_file. */ ASIO_DECL asio::error_code use_rsa_private_key_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use the specified memory buffer to obtain the temporary Diffie-Hellman /// parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a buffer. * * @param dh The memory buffer containing the Diffie-Hellman parameters. The * buffer must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL void use_tmp_dh(const const_buffer& dh); /// Use the specified memory buffer to obtain the temporary Diffie-Hellman /// parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a buffer. * * @param dh The memory buffer containing the Diffie-Hellman parameters. The * buffer must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL asio::error_code use_tmp_dh( const const_buffer& dh, asio::error_code& ec); /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL void use_tmp_dh_file(const std::string& filename); /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL asio::error_code use_tmp_dh_file( const std::string& filename, asio::error_code& ec); /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_default_passwd_cb. */ template void set_password_callback(PasswordCallback callback); /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_default_passwd_cb. */ template asio::error_code set_password_callback(PasswordCallback callback, asio::error_code& ec); private: struct bio_cleanup; struct x509_cleanup; struct evp_pkey_cleanup; struct rsa_cleanup; struct dh_cleanup; // Helper function used to set a peer certificate verification callback. ASIO_DECL asio::error_code do_set_verify_callback( detail::verify_callback_base* callback, asio::error_code& ec); // Callback used when the SSL implementation wants to verify a certificate. ASIO_DECL static int verify_callback_function( int preverified, X509_STORE_CTX* ctx); // Helper function used to set a password callback. ASIO_DECL asio::error_code do_set_password_callback( detail::password_callback_base* callback, asio::error_code& ec); // Callback used when the SSL implementation wants a password. ASIO_DECL static int password_callback_function( char* buf, int size, int purpose, void* data); // Helper function to set the temporary Diffie-Hellman parameters from a BIO. ASIO_DECL asio::error_code do_use_tmp_dh( BIO* bio, asio::error_code& ec); // Helper function to make a BIO from a memory buffer. ASIO_DECL BIO* make_buffer_bio(const const_buffer& b); // The underlying native implementation. native_handle_type handle_; // Ensure openssl is initialised. asio::ssl::detail::openssl_init<> init_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ssl/impl/context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/context.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_CONTEXT_HPP galera-3-25.3.20/asio/asio/ssl/impl/0000755000015300001660000000000013042054732016566 5ustar jenkinsjenkinsgalera-3-25.3.20/asio/asio/ssl/impl/context.ipp0000644000015300001660000005524513042054732020777 0ustar jenkinsjenkins// // ssl/impl/context.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_CONTEXT_IPP #define ASIO_SSL_IMPL_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include "asio/detail/throw_error.hpp" # include "asio/error.hpp" # include "asio/ssl/context.hpp" # include "asio/ssl/error.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) struct context::bio_cleanup { BIO* p; ~bio_cleanup() { if (p) ::BIO_free(p); } }; struct context::x509_cleanup { X509* p; ~x509_cleanup() { if (p) ::X509_free(p); } }; struct context::evp_pkey_cleanup { EVP_PKEY* p; ~evp_pkey_cleanup() { if (p) ::EVP_PKEY_free(p); } }; struct context::rsa_cleanup { RSA* p; ~rsa_cleanup() { if (p) ::RSA_free(p); } }; struct context::dh_cleanup { DH* p; ~dh_cleanup() { if (p) ::DH_free(p); } }; context::context(context::method m) : handle_(0) { ::ERR_clear_error(); switch (m) { #if defined(OPENSSL_NO_SSL2) case context::sslv2: case context::sslv2_client: case context::sslv2_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #else // defined(OPENSSL_NO_SSL2) case context::sslv2: handle_ = ::SSL_CTX_new(::SSLv2_method()); break; case context::sslv2_client: handle_ = ::SSL_CTX_new(::SSLv2_client_method()); break; case context::sslv2_server: handle_ = ::SSL_CTX_new(::SSLv2_server_method()); break; #endif // defined(OPENSSL_NO_SSL2) case context::sslv3: handle_ = ::SSL_CTX_new(::SSLv3_method()); break; case context::sslv3_client: handle_ = ::SSL_CTX_new(::SSLv3_client_method()); break; case context::sslv3_server: handle_ = ::SSL_CTX_new(::SSLv3_server_method()); break; case context::tlsv1: handle_ = ::SSL_CTX_new(::TLSv1_method()); break; case context::tlsv1_client: handle_ = ::SSL_CTX_new(::TLSv1_client_method()); break; case context::tlsv1_server: handle_ = ::SSL_CTX_new(::TLSv1_server_method()); break; case context::sslv23: handle_ = ::SSL_CTX_new(::SSLv23_method()); break; case context::sslv23_client: handle_ = ::SSL_CTX_new(::SSLv23_client_method()); break; case context::sslv23_server: handle_ = ::SSL_CTX_new(::SSLv23_server_method()); break; #if defined(SSL_TXT_TLSV1_1) case context::tlsv11: handle_ = ::SSL_CTX_new(::TLSv1_1_method()); break; case context::tlsv11_client: handle_ = ::SSL_CTX_new(::TLSv1_1_client_method()); break; case context::tlsv11_server: handle_ = ::SSL_CTX_new(::TLSv1_1_server_method()); break; #else // defined(SSL_TXT_TLSV1_1) case context::tlsv11: case context::tlsv11_client: case context::tlsv11_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_1) #if defined(SSL_TXT_TLSV1_2) case context::tlsv12: handle_ = ::SSL_CTX_new(::TLSv1_2_method()); break; case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLSv1_2_client_method()); break; case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLSv1_2_server_method()); break; #else // defined(SSL_TXT_TLSV1_2) case context::tlsv12: case context::tlsv12_client: case context::tlsv12_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_2) default: handle_ = ::SSL_CTX_new(0); break; } if (handle_ == 0) { asio::error_code ec( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "context"); } set_options(no_compression); } context::context(asio::io_service&, context::method m) : handle_(0) { context tmp(m); handle_ = tmp.handle_; tmp.handle_ = 0; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::context(context&& other) { handle_ = other.handle_; other.handle_ = 0; } context& context::operator=(context&& other) { context tmp(ASIO_MOVE_CAST(context)(*this)); handle_ = other.handle_; other.handle_ = 0; return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::~context() { if (handle_) { if (handle_->default_passwd_callback_userdata) { detail::password_callback_base* callback = static_cast( handle_->default_passwd_callback_userdata); delete callback; handle_->default_passwd_callback_userdata = 0; } if (SSL_CTX_get_app_data(handle_)) { detail::verify_callback_base* callback = static_cast( SSL_CTX_get_app_data(handle_)); delete callback; SSL_CTX_set_app_data(handle_, 0); } ::SSL_CTX_free(handle_); } } context::native_handle_type context::native_handle() { return handle_; } context::impl_type context::impl() { return handle_; } void context::clear_options(context::options o) { asio::error_code ec; clear_options(o, ec); asio::detail::throw_error(ec, "clear_options"); } asio::error_code context::clear_options( context::options o, asio::error_code& ec) { #if (OPENSSL_VERSION_NUMBER >= 0x009080DFL) \ && (OPENSSL_VERSION_NUMBER != 0x00909000L) # if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { # if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = SSL_COMP_get_compression_methods(); # endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } # endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_clear_options(handle_, o); ec = asio::error_code(); #else // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) (void)o; ec = asio::error::operation_not_supported; #endif // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) return ec; } void context::set_options(context::options o) { asio::error_code ec; set_options(o, ec); asio::detail::throw_error(ec, "set_options"); } asio::error_code context::set_options( context::options o, asio::error_code& ec) { #if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { #if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = asio::ssl::detail::openssl_init<>::get_null_compression_methods(); #endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } #endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_set_options(handle_, o); ec = asio::error_code(); return ec; } void context::set_verify_mode(verify_mode v) { asio::error_code ec; set_verify_mode(v, ec); asio::detail::throw_error(ec, "set_verify_mode"); } asio::error_code context::set_verify_mode( verify_mode v, asio::error_code& ec) { ::SSL_CTX_set_verify(handle_, v, ::SSL_CTX_get_verify_callback(handle_)); ec = asio::error_code(); return ec; } void context::set_verify_depth(int depth) { asio::error_code ec; set_verify_depth(depth, ec); asio::detail::throw_error(ec, "set_verify_depth"); } asio::error_code context::set_verify_depth( int depth, asio::error_code& ec) { ::SSL_CTX_set_verify_depth(handle_, depth); ec = asio::error_code(); return ec; } void context::load_verify_file(const std::string& filename) { asio::error_code ec; load_verify_file(filename, ec); asio::detail::throw_error(ec, "load_verify_file"); } asio::error_code context::load_verify_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, filename.c_str(), 0) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::add_certificate_authority(const const_buffer& ca) { asio::error_code ec; add_certificate_authority(ca, ec); asio::detail::throw_error(ec, "add_certificate_authority"); } asio::error_code context::add_certificate_authority( const const_buffer& ca, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(ca) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (cert.p) { if (X509_STORE* store = ::SSL_CTX_get_cert_store(handle_)) { if (::X509_STORE_add_cert(store, cert.p) == 1) { ec = asio::error_code(); return ec; } } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::set_default_verify_paths() { asio::error_code ec; set_default_verify_paths(ec); asio::detail::throw_error(ec, "set_default_verify_paths"); } asio::error_code context::set_default_verify_paths( asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_set_default_verify_paths(handle_) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::add_verify_path(const std::string& path) { asio::error_code ec; add_verify_path(path, ec); asio::detail::throw_error(ec, "add_verify_path"); } asio::error_code context::add_verify_path( const std::string& path, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, 0, path.c_str()) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_certificate( const const_buffer& certificate, file_format format) { asio::error_code ec; use_certificate(certificate, format, ec); asio::detail::throw_error(ec, "use_certificate"); } asio::error_code context::use_certificate( const const_buffer& certificate, file_format format, asio::error_code& ec) { ::ERR_clear_error(); if (format == context_base::asn1) { if (::SSL_CTX_use_certificate_ASN1(handle_, static_cast(buffer_size(certificate)), buffer_cast(certificate)) == 1) { ec = asio::error_code(); return ec; } } else if (format == context_base::pem) { bio_cleanup bio = { make_buffer_bio(certificate) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (cert.p) { if (::SSL_CTX_use_certificate(handle_, cert.p) == 1) { ec = asio::error_code(); return ec; } } } } else { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_certificate_file( const std::string& filename, file_format format) { asio::error_code ec; use_certificate_file(filename, format, ec); asio::detail::throw_error(ec, "use_certificate_file"); } asio::error_code context::use_certificate_file( const std::string& filename, file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } ::ERR_clear_error(); if (::SSL_CTX_use_certificate_file(handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_certificate_chain(const const_buffer& chain) { asio::error_code ec; use_certificate_chain(chain, ec); asio::detail::throw_error(ec, "use_certificate_chain"); } asio::error_code context::use_certificate_chain( const const_buffer& chain, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(chain) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509_AUX(bio.p, 0, handle_->default_passwd_callback, handle_->default_passwd_callback_userdata) }; if (!cert.p) { ec = asio::error_code(ERR_R_PEM_LIB, asio::error::get_ssl_category()); return ec; } int result = ::SSL_CTX_use_certificate(handle_, cert.p); if (result == 0 || ::ERR_peek_error() != 0) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } if (handle_->extra_certs) { ::sk_X509_pop_free(handle_->extra_certs, X509_free); handle_->extra_certs = 0; } while (X509* cacert = ::PEM_read_bio_X509(bio.p, 0, handle_->default_passwd_callback, handle_->default_passwd_callback_userdata)) { if (!::SSL_CTX_add_extra_chain_cert(handle_, cacert)) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } } result = ::ERR_peek_last_error(); if ((ERR_GET_LIB(result) == ERR_LIB_PEM) && (ERR_GET_REASON(result) == PEM_R_NO_START_LINE)) { ::ERR_clear_error(); ec = asio::error_code(); return ec; } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_certificate_chain_file(const std::string& filename) { asio::error_code ec; use_certificate_chain_file(filename, ec); asio::detail::throw_error(ec, "use_certificate_chain_file"); } asio::error_code context::use_certificate_chain_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_use_certificate_chain_file(handle_, filename.c_str()) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_private_key( const const_buffer& private_key, context::file_format format) { asio::error_code ec; use_private_key(private_key, format, ec); asio::detail::throw_error(ec, "use_private_key"); } asio::error_code context::use_private_key( const const_buffer& private_key, context::file_format format, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { evp_pkey_cleanup evp_private_key = { 0 }; switch (format) { case context_base::asn1: evp_private_key.p = ::d2i_PrivateKey_bio(bio.p, 0); break; case context_base::pem: evp_private_key.p = ::PEM_read_bio_PrivateKey( bio.p, 0, handle_->default_passwd_callback, handle_->default_passwd_callback_userdata); break; default: { ec = asio::error::invalid_argument; return ec; } } if (evp_private_key.p) { if (::SSL_CTX_use_PrivateKey(handle_, evp_private_key.p) == 1) { ec = asio::error_code(); return ec; } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_private_key_file( const std::string& filename, context::file_format format) { asio::error_code ec; use_private_key_file(filename, format, ec); asio::detail::throw_error(ec, "use_private_key_file"); } void context::use_rsa_private_key( const const_buffer& private_key, context::file_format format) { asio::error_code ec; use_rsa_private_key(private_key, format, ec); asio::detail::throw_error(ec, "use_rsa_private_key"); } asio::error_code context::use_rsa_private_key( const const_buffer& private_key, context::file_format format, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { rsa_cleanup rsa_private_key = { 0 }; switch (format) { case context_base::asn1: rsa_private_key.p = ::d2i_RSAPrivateKey_bio(bio.p, 0); break; case context_base::pem: rsa_private_key.p = ::PEM_read_bio_RSAPrivateKey( bio.p, 0, handle_->default_passwd_callback, handle_->default_passwd_callback_userdata); break; default: { ec = asio::error::invalid_argument; return ec; } } if (rsa_private_key.p) { if (::SSL_CTX_use_RSAPrivateKey(handle_, rsa_private_key.p) == 1) { ec = asio::error_code(); return ec; } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } asio::error_code context::use_private_key_file( const std::string& filename, context::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } ::ERR_clear_error(); if (::SSL_CTX_use_PrivateKey_file(handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_rsa_private_key_file( const std::string& filename, context::file_format format) { asio::error_code ec; use_rsa_private_key_file(filename, format, ec); asio::detail::throw_error(ec, "use_rsa_private_key_file"); } asio::error_code context::use_rsa_private_key_file( const std::string& filename, context::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } ::ERR_clear_error(); if (::SSL_CTX_use_RSAPrivateKey_file( handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_tmp_dh(const const_buffer& dh) { asio::error_code ec; use_tmp_dh(dh, ec); asio::detail::throw_error(ec, "use_tmp_dh"); } asio::error_code context::use_tmp_dh( const const_buffer& dh, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(dh) }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_tmp_dh_file(const std::string& filename) { asio::error_code ec; use_tmp_dh_file(filename, ec); asio::detail::throw_error(ec, "use_tmp_dh_file"); } asio::error_code context::use_tmp_dh_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { ::BIO_new_file(filename.c_str(), "r") }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } asio::error_code context::do_use_tmp_dh( BIO* bio, asio::error_code& ec) { ::ERR_clear_error(); dh_cleanup dh = { ::PEM_read_bio_DHparams(bio, 0, 0, 0) }; if (dh.p) { if (::SSL_CTX_set_tmp_dh(handle_, dh.p) == 1) { ec = asio::error_code(); return ec; } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } asio::error_code context::do_set_verify_callback( detail::verify_callback_base* callback, asio::error_code& ec) { if (SSL_CTX_get_app_data(handle_)) { delete static_cast( SSL_CTX_get_app_data(handle_)); } SSL_CTX_set_app_data(handle_, callback); ::SSL_CTX_set_verify(handle_, ::SSL_CTX_get_verify_mode(handle_), &context::verify_callback_function); ec = asio::error_code(); return ec; } int context::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_CTX* handle = ::SSL_get_SSL_CTX(ssl)) { if (SSL_CTX_get_app_data(handle)) { detail::verify_callback_base* callback = static_cast( SSL_CTX_get_app_data(handle)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } } return 0; } asio::error_code context::do_set_password_callback( detail::password_callback_base* callback, asio::error_code& ec) { if (handle_->default_passwd_callback_userdata) delete static_cast( handle_->default_passwd_callback_userdata); handle_->default_passwd_callback_userdata = callback; SSL_CTX_set_default_passwd_cb(handle_, &context::password_callback_function); ec = asio::error_code(); return ec; } int context::password_callback_function( char* buf, int size, int purpose, void* data) { using namespace std; // For strncat and strlen. if (data) { detail::password_callback_base* callback = static_cast(data); std::string passwd = callback->call(static_cast(size), purpose ? context_base::for_writing : context_base::for_reading); #if defined(ASIO_HAS_SECURE_RTL) strcpy_s(buf, size, passwd.c_str()); #else // defined(ASIO_HAS_SECURE_RTL) *buf = '\0'; strncat(buf, passwd.c_str(), size); #endif // defined(ASIO_HAS_SECURE_RTL) return static_cast(strlen(buf)); } return 0; } BIO* context::make_buffer_bio(const const_buffer& b) { return ::BIO_new_mem_buf( const_cast(buffer_cast(b)), static_cast(buffer_size(b))); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_CONTEXT_IPP galera-3-25.3.20/asio/asio/ssl/impl/src.hpp0000644000015300001660000000137013042054732020067 0ustar jenkinsjenkins// // impl/ssl/src.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_SRC_HPP #define ASIO_SSL_IMPL_SRC_HPP #define ASIO_SOURCE #include "asio/detail/config.hpp" #if defined(ASIO_HEADER_ONLY) # error Do not compile Asio library source with ASIO_HEADER_ONLY defined #endif #include "asio/ssl/impl/context.ipp" #include "asio/ssl/impl/error.ipp" #include "asio/ssl/detail/impl/engine.ipp" #include "asio/ssl/detail/impl/openssl_init.ipp" #include "asio/ssl/impl/rfc2818_verification.ipp" #endif // ASIO_SSL_IMPL_SRC_HPP galera-3-25.3.20/asio/asio/ssl/impl/error.ipp0000644000015300001660000000227013042054732020432 0ustar jenkinsjenkins// // ssl/impl/error.ipp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_ERROR_IPP #define ASIO_SSL_IMPL_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/error.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { namespace detail { class ssl_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.ssl"; } std::string message(int value) const { const char* s = ::ERR_reason_error_string(value); return s ? s : "asio.ssl error"; } }; } // namespace detail const asio::error_category& get_ssl_category() { static detail::ssl_category instance; return instance; } } // namespace error } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_ERROR_IPP galera-3-25.3.20/asio/asio/ssl/impl/rfc2818_verification.ipp0000644000015300001660000001131613042054732023141 0ustar jenkinsjenkins// // ssl/impl/rfc2818_verification.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP #define ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include # include "asio/ip/address.hpp" # include "asio/ssl/rfc2818_verification.hpp" # include "asio/ssl/detail/openssl_types.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) bool rfc2818_verification::operator()( bool preverified, verify_context& ctx) const { using namespace std; // For memcmp. // Don't bother looking at certificates that have failed pre-verification. if (!preverified) return false; // We're only interested in checking the certificate at the end of the chain. int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle()); if (depth > 0) return true; // Try converting the host name to an address. If it is an address then we // need to look for an IP address in the certificate rather than a host name. asio::error_code ec; ip::address address = ip::address::from_string(host_, ec); bool is_address = !ec; X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle()); // Go through the alternate names in the certificate looking for matching DNS // or IP address entries. GENERAL_NAMES* gens = static_cast( X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0)); for (int i = 0; i < sk_GENERAL_NAME_num(gens); ++i) { GENERAL_NAME* gen = sk_GENERAL_NAME_value(gens, i); if (gen->type == GEN_DNS && !is_address) { ASN1_IA5STRING* domain = gen->d.dNSName; if (domain->type == V_ASN1_IA5STRING && domain->data && domain->length) { const char* pattern = reinterpret_cast(domain->data); std::size_t pattern_length = domain->length; if (match_pattern(pattern, pattern_length, host_.c_str())) { GENERAL_NAMES_free(gens); return true; } } } else if (gen->type == GEN_IPADD && is_address) { ASN1_OCTET_STRING* ip_address = gen->d.iPAddress; if (ip_address->type == V_ASN1_OCTET_STRING && ip_address->data) { if (address.is_v4() && ip_address->length == 4) { ip::address_v4::bytes_type bytes = address.to_v4().to_bytes(); if (memcmp(bytes.data(), ip_address->data, 4) == 0) { GENERAL_NAMES_free(gens); return true; } } else if (address.is_v6() && ip_address->length == 16) { ip::address_v6::bytes_type bytes = address.to_v6().to_bytes(); if (memcmp(bytes.data(), ip_address->data, 16) == 0) { GENERAL_NAMES_free(gens); return true; } } } } } GENERAL_NAMES_free(gens); // No match in the alternate names, so try the common names. We should only // use the "most specific" common name, which is the last one in the list. X509_NAME* name = X509_get_subject_name(cert); int i = -1; ASN1_STRING* common_name = 0; while ((i = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0) { X509_NAME_ENTRY* name_entry = X509_NAME_get_entry(name, i); common_name = X509_NAME_ENTRY_get_data(name_entry); } if (common_name && common_name->data && common_name->length) { const char* pattern = reinterpret_cast(common_name->data); std::size_t pattern_length = common_name->length; if (match_pattern(pattern, pattern_length, host_.c_str())) return true; } return false; } bool rfc2818_verification::match_pattern(const char* pattern, std::size_t pattern_length, const char* host) { using namespace std; // For tolower. const char* p = pattern; const char* p_end = p + pattern_length; const char* h = host; while (p != p_end && *h) { if (*p == '*') { ++p; while (*h && *h != '.') if (match_pattern(p, p_end - p, h++)) return true; } else if (tolower(*p) == tolower(*h)) { ++p; ++h; } else { return false; } } return p == p_end && !*h; } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP galera-3-25.3.20/asio/asio/ssl/impl/context.hpp0000644000015300001660000000355113042054732020767 0ustar jenkinsjenkins// // ssl/impl/context.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_CONTEXT_HPP #define ASIO_SSL_IMPL_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/throw_error.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) template void context::set_verify_callback(VerifyCallback callback) { asio::error_code ec; this->set_verify_callback(callback, ec); asio::detail::throw_error(ec, "set_verify_callback"); } template asio::error_code context::set_verify_callback( VerifyCallback callback, asio::error_code& ec) { return do_set_verify_callback( new detail::verify_callback(callback), ec); } template void context::set_password_callback(PasswordCallback callback) { asio::error_code ec; this->set_password_callback(callback, ec); asio::detail::throw_error(ec, "set_password_callback"); } template asio::error_code context::set_password_callback( PasswordCallback callback, asio::error_code& ec) { return do_set_password_callback( new detail::password_callback(callback), ec); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_CONTEXT_HPP galera-3-25.3.20/asio/asio/buffered_stream_fwd.hpp0000644000015300001660000000114413042054732021532 0ustar jenkinsjenkins// // buffered_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_STREAM_FWD_HPP #define ASIO_BUFFERED_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_stream; } // namespace asio #endif // ASIO_BUFFERED_STREAM_FWD_HPP galera-3-25.3.20/asio/asio/io_service.hpp0000644000015300001660000006623213042054732017675 0ustar jenkinsjenkins// // io_service.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IO_SERVICE_HPP #define ASIO_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/async_result.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/error_code.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/winsock_init.hpp" #elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \ || defined(__osf__) # include "asio/detail/signal_init.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { class io_service; template Service& use_service(io_service& ios); template void add_service(io_service& ios, Service* svc); template bool has_service(io_service& ios); namespace detail { #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_service io_service_impl; class win_iocp_overlapped_ptr; #else typedef class task_io_service io_service_impl; #endif class service_registry; } // namespace detail /// Provides core I/O functionality. /** * The io_service class provides the core I/O functionality for users of the * asynchronous I/O objects, including: * * @li asio::ip::tcp::socket * @li asio::ip::tcp::acceptor * @li asio::ip::udp::socket * @li asio::deadline_timer. * * The io_service class also includes facilities intended for developers of * custom asynchronous services. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe, with the specific exceptions of the reset() and * notify_fork() functions. Calling reset() while there are unfinished run(), * run_one(), poll() or poll_one() calls results in undefined behaviour. The * notify_fork() function should not be called while any io_service function, * or any function on an I/O object that is associated with the io_service, is * being called in another thread. * * @par Concepts: * Dispatcher. * * @par Synchronous and asynchronous operations * * Synchronous operations on I/O objects implicitly run the io_service object * for an individual operation. The io_service functions run(), run_one(), * poll() or poll_one() must be called for the io_service to perform * asynchronous operations on behalf of a C++ program. Notification that an * asynchronous operation has completed is delivered by invocation of the * associated handler. Handlers are invoked only by a thread that is currently * calling any overload of run(), run_one(), poll() or poll_one() for the * io_service. * * @par Effect of exceptions thrown from handlers * * If an exception is thrown from a handler, the exception is allowed to * propagate through the throwing thread's invocation of run(), run_one(), * poll() or poll_one(). No other threads that are calling any of these * functions are affected. It is then the responsibility of the application to * catch the exception. * * After the exception has been caught, the run(), run_one(), poll() or * poll_one() call may be restarted @em without the need for an intervening * call to reset(). This allows the thread to rejoin the io_service object's * thread pool without impacting any other threads in the pool. * * For example: * * @code * asio::io_service io_service; * ... * for (;;) * { * try * { * io_service.run(); * break; // run() exited normally * } * catch (my_exception& e) * { * // Deal with exception as appropriate. * } * } * @endcode * * @par Stopping the io_service from running out of work * * Some applications may need to prevent an io_service object's run() call from * returning when there is no more work to do. For example, the io_service may * be being run in a background thread that is launched prior to the * application's asynchronous operations. The run() call may be kept running by * creating an object of type asio::io_service::work: * * @code asio::io_service io_service; * asio::io_service::work work(io_service); * ... @endcode * * To effect a shutdown, the application will then need to call the io_service * object's stop() member function. This will cause the io_service run() call * to return as soon as possible, abandoning unfinished operations and without * permitting ready handlers to be dispatched. * * Alternatively, if the application requires that all operations and handlers * be allowed to finish normally, the work object may be explicitly destroyed. * * @code asio::io_service io_service; * auto_ptr work( * new asio::io_service::work(io_service)); * ... * work.reset(); // Allow run() to exit. @endcode * * @par The io_service class and I/O services * * Class io_service implements an extensible, type-safe, polymorphic set of I/O * services, indexed by service type. An object of class io_service must be * initialised before I/O objects such as sockets, resolvers and timers can be * used. These I/O objects are distinguished by having constructors that accept * an @c io_service& parameter. * * I/O services exist to manage the logical interface to the operating system on * behalf of the I/O objects. In particular, there are resources that are shared * across a class of I/O objects. For example, timers may be implemented in * terms of a single timer queue. The I/O services manage these shared * resources. * * Access to the services of an io_service is via three function templates, * use_service(), add_service() and has_service(). * * In a call to @c use_service(), the type argument chooses a service, * making available all members of the named type. If @c Service is not present * in an io_service, an object of type @c Service is created and added to the * io_service. A C++ program can check if an io_service implements a * particular service with the function template @c has_service(). * * Service objects may be explicitly added to an io_service using the function * template @c add_service(). If the @c Service is already present, the * service_already_exists exception is thrown. If the owner of the service is * not the same object as the io_service parameter, the invalid_service_owner * exception is thrown. * * Once a service reference is obtained from an io_service object by calling * use_service(), that reference remains usable as long as the owning io_service * object exists. * * All I/O service implementations have io_service::service as a public base * class. Custom I/O services may be implemented by deriving from this class and * then added to an io_service using the facilities described above. */ class io_service : private noncopyable { private: typedef detail::io_service_impl impl_type; #if defined(ASIO_HAS_IOCP) friend class detail::win_iocp_overlapped_ptr; #endif public: class work; friend class work; class id; class service; class strand; /// Constructor. ASIO_DECL io_service(); /// Constructor. /** * Construct with a hint about the required level of concurrency. * * @param concurrency_hint A suggestion to the implementation on how many * threads it should allow to run simultaneously. */ ASIO_DECL explicit io_service(std::size_t concurrency_hint); /// Destructor. /** * On destruction, the io_service performs the following sequence of * operations: * * @li For each service object @c svc in the io_service set, in reverse order * of the beginning of service object lifetime, performs * @c svc->shutdown_service(). * * @li Uninvoked handler objects that were scheduled for deferred invocation * on the io_service, or any associated strand, are destroyed. * * @li For each service object @c svc in the io_service set, in reverse order * of the beginning of service object lifetime, performs * delete static_cast(svc). * * @note The destruction sequence described above permits programs to * simplify their resource management by using @c shared_ptr<>. Where an * object's lifetime is tied to the lifetime of a connection (or some other * sequence of asynchronous operations), a @c shared_ptr to the object would * be bound into the handlers for all asynchronous operations associated with * it. This works as follows: * * @li When a single connection ends, all associated asynchronous operations * complete. The corresponding handler objects are destroyed, and all * @c shared_ptr references to the objects are destroyed. * * @li To shut down the whole program, the io_service function stop() is * called to terminate any run() calls as soon as possible. The io_service * destructor defined above destroys all handlers, causing all @c shared_ptr * references to all connection objects to be destroyed. */ ASIO_DECL ~io_service(); /// Run the io_service object's event processing loop. /** * The run() function blocks until all work has finished and there are no * more handlers to be dispatched, or until the io_service has been stopped. * * Multiple threads may call the run() function to set up a pool of threads * from which the io_service may execute handlers. All threads that are * waiting in the pool are equivalent and the io_service may choose any one * of them to invoke a handler. * * A normal exit from the run() function implies that the io_service object * is stopped (the stopped() function returns @c true). Subsequent calls to * run(), run_one(), poll() or poll_one() will return immediately unless there * is a prior call to reset(). * * @return The number of handlers that were executed. * * @throws asio::system_error Thrown on failure. * * @note The run() function must not be called from a thread that is currently * calling one of run(), run_one(), poll() or poll_one() on the same * io_service object. * * The poll() function may also be used to dispatch ready handlers, but * without blocking. */ ASIO_DECL std::size_t run(); /// Run the io_service object's event processing loop. /** * The run() function blocks until all work has finished and there are no * more handlers to be dispatched, or until the io_service has been stopped. * * Multiple threads may call the run() function to set up a pool of threads * from which the io_service may execute handlers. All threads that are * waiting in the pool are equivalent and the io_service may choose any one * of them to invoke a handler. * * A normal exit from the run() function implies that the io_service object * is stopped (the stopped() function returns @c true). Subsequent calls to * run(), run_one(), poll() or poll_one() will return immediately unless there * is a prior call to reset(). * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. * * @note The run() function must not be called from a thread that is currently * calling one of run(), run_one(), poll() or poll_one() on the same * io_service object. * * The poll() function may also be used to dispatch ready handlers, but * without blocking. */ ASIO_DECL std::size_t run(asio::error_code& ec); /// Run the io_service object's event processing loop to execute at most one /// handler. /** * The run_one() function blocks until one handler has been dispatched, or * until the io_service has been stopped. * * @return The number of handlers that were executed. A zero return value * implies that the io_service object is stopped (the stopped() function * returns @c true). Subsequent calls to run(), run_one(), poll() or * poll_one() will return immediately unless there is a prior call to * reset(). * * @throws asio::system_error Thrown on failure. */ ASIO_DECL std::size_t run_one(); /// Run the io_service object's event processing loop to execute at most one /// handler. /** * The run_one() function blocks until one handler has been dispatched, or * until the io_service has been stopped. * * @return The number of handlers that were executed. A zero return value * implies that the io_service object is stopped (the stopped() function * returns @c true). Subsequent calls to run(), run_one(), poll() or * poll_one() will return immediately unless there is a prior call to * reset(). * * @return The number of handlers that were executed. */ ASIO_DECL std::size_t run_one(asio::error_code& ec); /// Run the io_service object's event processing loop to execute ready /// handlers. /** * The poll() function runs handlers that are ready to run, without blocking, * until the io_service has been stopped or there are no more ready handlers. * * @return The number of handlers that were executed. * * @throws asio::system_error Thrown on failure. */ ASIO_DECL std::size_t poll(); /// Run the io_service object's event processing loop to execute ready /// handlers. /** * The poll() function runs handlers that are ready to run, without blocking, * until the io_service has been stopped or there are no more ready handlers. * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. */ ASIO_DECL std::size_t poll(asio::error_code& ec); /// Run the io_service object's event processing loop to execute one ready /// handler. /** * The poll_one() function runs at most one handler that is ready to run, * without blocking. * * @return The number of handlers that were executed. * * @throws asio::system_error Thrown on failure. */ ASIO_DECL std::size_t poll_one(); /// Run the io_service object's event processing loop to execute one ready /// handler. /** * The poll_one() function runs at most one handler that is ready to run, * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. */ ASIO_DECL std::size_t poll_one(asio::error_code& ec); /// Stop the io_service object's event processing loop. /** * This function does not block, but instead simply signals the io_service to * stop. All invocations of its run() or run_one() member functions should * return as soon as possible. Subsequent calls to run(), run_one(), poll() * or poll_one() will return immediately until reset() is called. */ ASIO_DECL void stop(); /// Determine whether the io_service object has been stopped. /** * This function is used to determine whether an io_service object has been * stopped, either through an explicit call to stop(), or due to running out * of work. When an io_service object is stopped, calls to run(), run_one(), * poll() or poll_one() will return immediately without invoking any * handlers. * * @return @c true if the io_service object is stopped, otherwise @c false. */ ASIO_DECL bool stopped() const; /// Reset the io_service in preparation for a subsequent run() invocation. /** * This function must be called prior to any second or later set of * invocations of the run(), run_one(), poll() or poll_one() functions when a * previous invocation of these functions returned due to the io_service * being stopped or running out of work. After a call to reset(), the * io_service object's stopped() function will return @c false. * * This function must not be called while there are any unfinished calls to * the run(), run_one(), poll() or poll_one() functions. */ ASIO_DECL void reset(); /// Request the io_service to invoke the given handler. /** * This function is used to ask the io_service to execute the given handler. * * The io_service guarantees that the handler will only be called in a thread * in which the run(), run_one(), poll() or poll_one() member functions is * currently being invoked. The handler may be executed inside this function * if the guarantee can be met. * * @param handler The handler to be called. The io_service will make * a copy of the handler object as required. The function signature of the * handler must be: @code void handler(); @endcode * * @note This function throws an exception only if: * * @li the handler's @c asio_handler_allocate function; or * * @li the handler's copy constructor * * throws an exception. */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) dispatch(ASIO_MOVE_ARG(CompletionHandler) handler); /// Request the io_service to invoke the given handler and return immediately. /** * This function is used to ask the io_service to execute the given handler, * but without allowing the io_service to call the handler from inside this * function. * * The io_service guarantees that the handler will only be called in a thread * in which the run(), run_one(), poll() or poll_one() member functions is * currently being invoked. * * @param handler The handler to be called. The io_service will make * a copy of the handler object as required. The function signature of the * handler must be: @code void handler(); @endcode * * @note This function throws an exception only if: * * @li the handler's @c asio_handler_allocate function; or * * @li the handler's copy constructor * * throws an exception. */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) post(ASIO_MOVE_ARG(CompletionHandler) handler); /// Create a new handler that automatically dispatches the wrapped handler /// on the io_service. /** * This function is used to create a new handler function object that, when * invoked, will automatically pass the wrapped handler to the io_service * object's dispatch function. * * @param handler The handler to be wrapped. The io_service will make a copy * of the handler object as required. The function signature of the handler * must be: @code void handler(A1 a1, ... An an); @endcode * * @return A function object that, when invoked, passes the wrapped handler to * the io_service object's dispatch function. Given a function object with the * signature: * @code R f(A1 a1, ... An an); @endcode * If this function object is passed to the wrap function like so: * @code io_service.wrap(f); @endcode * then the return value is a function object with the signature * @code void g(A1 a1, ... An an); @endcode * that, when invoked, executes code equivalent to: * @code io_service.dispatch(boost::bind(f, a1, ... an)); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else detail::wrapped_handler #endif wrap(Handler handler); /// Fork-related event notifications. enum fork_event { /// Notify the io_service that the process is about to fork. fork_prepare, /// Notify the io_service that the process has forked and is the parent. fork_parent, /// Notify the io_service that the process has forked and is the child. fork_child }; /// Notify the io_service of a fork-related event. /** * This function is used to inform the io_service that the process is about * to fork, or has just forked. This allows the io_service, and the services * it contains, to perform any necessary housekeeping to ensure correct * operation following a fork. * * This function must not be called while any other io_service function, or * any function on an I/O object associated with the io_service, is being * called in another thread. It is, however, safe to call this function from * within a completion handler, provided no other thread is accessing the * io_service. * * @param event A fork-related event. * * @throws asio::system_error Thrown on failure. If the notification * fails the io_service object should no longer be used and should be * destroyed. * * @par Example * The following code illustrates how to incorporate the notify_fork() * function: * @code my_io_service.notify_fork(asio::io_service::fork_prepare); * if (fork() == 0) * { * // This is the child process. * my_io_service.notify_fork(asio::io_service::fork_child); * } * else * { * // This is the parent process. * my_io_service.notify_fork(asio::io_service::fork_parent); * } @endcode * * @note For each service object @c svc in the io_service set, performs * svc->fork_service();. When processing the fork_prepare event, * services are visited in reverse order of the beginning of service object * lifetime. Otherwise, services are visited in order of the beginning of * service object lifetime. */ ASIO_DECL void notify_fork(asio::io_service::fork_event event); /// Obtain the service object corresponding to the given type. /** * This function is used to locate a service object that corresponds to * the given service type. If there is no existing implementation of the * service, then the io_service will create a new instance of the service. * * @param ios The io_service object that owns the service. * * @return The service interface implementing the specified service type. * Ownership of the service interface is not transferred to the caller. */ template friend Service& use_service(io_service& ios); /// Add a service object to the io_service. /** * This function is used to add a service to the io_service. * * @param ios The io_service object that owns the service. * * @param svc The service object. On success, ownership of the service object * is transferred to the io_service. When the io_service object is destroyed, * it will destroy the service object by performing: * @code delete static_cast(svc) @endcode * * @throws asio::service_already_exists Thrown if a service of the * given type is already present in the io_service. * * @throws asio::invalid_service_owner Thrown if the service's owning * io_service is not the io_service object specified by the ios parameter. */ template friend void add_service(io_service& ios, Service* svc); /// Determine if an io_service contains a specified service type. /** * This function is used to determine whether the io_service contains a * service object corresponding to the given service type. * * @param ios The io_service object that owns the service. * * @return A boolean indicating whether the io_service contains the service. */ template friend bool has_service(io_service& ios); private: #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) detail::winsock_init<> init_; #elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \ || defined(__osf__) detail::signal_init<> init_; #endif // The service registry. asio::detail::service_registry* service_registry_; // The implementation. impl_type& impl_; }; /// Class to inform the io_service when it has work to do. /** * The work class is used to inform the io_service when work starts and * finishes. This ensures that the io_service object's run() function will not * exit while work is underway, and that it does exit when there is no * unfinished work remaining. * * The work class is copy-constructible so that it may be used as a data member * in a handler class. It is not assignable. */ class io_service::work { public: /// Constructor notifies the io_service that work is starting. /** * The constructor is used to inform the io_service that some work has begun. * This ensures that the io_service object's run() function will not exit * while the work is underway. */ explicit work(asio::io_service& io_service); /// Copy constructor notifies the io_service that work is starting. /** * The constructor is used to inform the io_service that some work has begun. * This ensures that the io_service object's run() function will not exit * while the work is underway. */ work(const work& other); /// Destructor notifies the io_service that the work is complete. /** * The destructor is used to inform the io_service that some work has * finished. Once the count of unfinished work reaches zero, the io_service * object's run() function is permitted to exit. */ ~work(); /// Get the io_service associated with the work. asio::io_service& get_io_service(); private: // Prevent assignment. void operator=(const work& other); // The io_service implementation. detail::io_service_impl& io_service_impl_; }; /// Class used to uniquely identify a service. class io_service::id : private noncopyable { public: /// Constructor. id() {} }; /// Base class for all io_service services. class io_service::service : private noncopyable { public: /// Get the io_service object that owns the service. asio::io_service& get_io_service(); protected: /// Constructor. /** * @param owner The io_service object that owns the service. */ ASIO_DECL service(asio::io_service& owner); /// Destructor. ASIO_DECL virtual ~service(); private: /// Destroy all user-defined handler objects owned by the service. virtual void shutdown_service() = 0; /// Handle notification of a fork-related event to perform any necessary /// housekeeping. /** * This function is not a pure virtual so that services only have to * implement it if necessary. The default implementation does nothing. */ ASIO_DECL virtual void fork_service( asio::io_service::fork_event event); friend class asio::detail::service_registry; struct key { key() : type_info_(0), id_(0) {} const std::type_info* type_info_; const asio::io_service::id* id_; } key_; asio::io_service& owner_; service* next_; }; /// Exception thrown when trying to add a duplicate service to an io_service. class service_already_exists : public std::logic_error { public: ASIO_DECL service_already_exists(); }; /// Exception thrown when trying to add a service object to an io_service where /// the service has a different owner. class invalid_service_owner : public std::logic_error { public: ASIO_DECL invalid_service_owner(); }; namespace detail { // Special derived service id type to keep classes header-file only. template class service_id : public asio::io_service::id { }; // Special service base class to keep classes header-file only. template class service_base : public asio::io_service::service { public: static asio::detail::service_id id; // Constructor. service_base(asio::io_service& io_service) : asio::io_service::service(io_service) { } }; template asio::detail::service_id service_base::id; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/io_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/io_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IO_SERVICE_HPP galera-3-25.3.20/asio/asio/basic_socket_streambuf.hpp0000644000015300001660000003721213042054732022243 0ustar jenkinsjenkins// // basic_socket_streambuf.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_STREAMBUF_HPP #define ASIO_BASIC_SOCKET_STREAMBUF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include #include "asio/basic_socket.hpp" #include "asio/deadline_timer_service.hpp" #include "asio/detail/array.hpp" #include "asio/detail/throw_error.hpp" #include "asio/io_service.hpp" #include "asio/stream_socket_service.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/deadline_timer.hpp" #else # include "asio/steady_timer.hpp" #endif #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # include "asio/detail/variadic_templates.hpp" // A macro that should expand to: // template // basic_socket_streambuf* connect( // T1 x1, ..., Tn xn) // { // init_buffers(); // this->basic_socket::close(ec_); // typedef typename Protocol::resolver resolver_type; // typedef typename resolver_type::query resolver_query; // resolver_query query(x1, ..., xn); // resolve_and_connect(query); // return !ec_ ? this : 0; // } // This macro should only persist within this file. # define ASIO_PRIVATE_CONNECT_DEF(n) \ template \ basic_socket_streambuf* connect(ASIO_VARIADIC_PARAMS(n)) \ { \ init_buffers(); \ this->basic_socket::close(ec_); \ typedef typename Protocol::resolver resolver_type; \ typedef typename resolver_type::query resolver_query; \ resolver_query query(ASIO_VARIADIC_ARGS(n)); \ resolve_and_connect(query); \ return !ec_ ? this : 0; \ } \ /**/ #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A separate base class is used to ensure that the io_service is initialised // prior to the basic_socket_streambuf's basic_socket base class. class socket_streambuf_base { protected: io_service io_service_; }; } // namespace detail /// Iostream streambuf for a socket. template , #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) typename Time = boost::posix_time::ptime, typename TimeTraits = asio::time_traits