galera-26.4.3/0000775000177500017540000000000013540715002011356 5ustar dbartmygalera-26.4.3/docs/0000775000177500017540000000000013540715002012306 5ustar dbartmygalera-26.4.3/docs/README0000664000177500017540000000010213540715002013157 0ustar dbartmyDocumentation moved to https://github.com/codership/documentation galera-26.4.3/galera/0000775000177500017540000000000013540715002012611 5ustar dbartmygalera-26.4.3/galera/src/0000775000177500017540000000000013540715002013400 5ustar dbartmygalera-26.4.3/galera/src/key_os.hpp0000664000177500017540000002204713540715002015407 0ustar dbartmy// // Copyright (C) 2011-2013 Codership Oy // #ifndef GALERA_KEY_HPP #define GALERA_KEY_HPP #include "wsrep_api.h" #include "gu_hash.h" #include "gu_serialize.hpp" #include "gu_unordered.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_vlq.hpp" #include #include #include #include #include namespace galera { // helper to cast from any kind of pointer to void template static inline void* void_cast(const C* c) { return const_cast(reinterpret_cast(c)); } class KeyPartOS { public: KeyPartOS(const gu::byte_t* buf, size_t buf_size) : buf_(buf), buf_size_(buf_size) { } const gu::byte_t* buf() const { return buf_; } size_t size() const { return buf_size_; } size_t key_len() const { #ifndef GALERA_KEY_VLQ return buf_[0]; #else size_t ret; (void)gu::uleb128_decode(buf_, buf_size_, 0, ret); return ret; #endif } #ifndef GALERA_KEY_VLQ const gu::byte_t* key() const { return buf_ + 1; } #else const gu::byte_t* key() const { size_t not_used; return buf_ + gu::uleb128_decode(buf_, buf_size_, 0, not_used); } #endif bool operator==(const KeyPartOS& other) const { return (other.buf_size_ == buf_size_ && memcmp(other.buf_, buf_, buf_size_) == 0); } private: const gu::byte_t* buf_; size_t buf_size_; }; inline std::ostream& operator<<(std::ostream& os, const KeyPartOS& kp) { const std::ostream::fmtflags prev_flags(os.flags(std::ostream::hex)); const char prev_fill(os.fill('0')); for (const gu::byte_t* i(kp.key()); i != kp.key() + kp.key_len(); ++i) { os << std::setw(2) << static_cast(*i); } os.flags(prev_flags); os.fill(prev_fill); return os; } class KeyOS { public: enum { F_SHARED = 0x1 }; KeyOS(int version) : version_(version), flags_(), keys_() { } KeyOS(int version, const wsrep_buf_t* keys, size_t keys_len, uint8_t flags) : version_(version), flags_ (flags), keys_ () { if (keys_len > 255) { gu_throw_error(EINVAL) << "maximum number of key parts exceeded: " << keys_len; } switch (version) { case 1: case 2: for (size_t i(0); i < keys_len; ++i) { size_t const offset(keys_.size()); size_t key_len(keys[i].len); const gu::byte_t* base(reinterpret_cast( keys[i].ptr)); #ifndef GALERA_KEY_VLQ if (gu_unlikely(key_len > 0xff)) key_len = 0xff; keys_.reserve(offset + 1 + key_len); keys_.insert(keys_.end(), key_len); keys_.insert(keys_.end(), base, base + key_len); #else size_t len_size(gu::uleb128_size(key_len)); keys_.resize(offset + len_size); (void)gu::uleb128_encode( key_len, &keys_[0], keys_.size(), offset); keys_.insert(keys_.end(), base, base + keys[i].key_len); #endif } break; default: gu_throw_fatal << "unsupported key version: " << version_; } } template KeyOS(int version, Ci begin, Ci end, uint8_t flags) : version_(version), flags_(flags), keys_() { for (Ci i(begin); i != end; ++i) { keys_.insert( keys_.end(), i->buf(), i->buf() + i->size()); } } int version() const { return version_; } template C key_parts() const { C ret; size_t i(0); size_t const keys_size(keys_.size()); while (i < keys_size) { #ifndef GALERA_KEY_VLQ size_t key_len(keys_[i] + 1); #else size_t key_len; size_t offset( gu::uleb128_decode(&keys_[0], keys_size, i, key_len)); key_len += offset - i; #endif if (gu_unlikely((i + key_len) > keys_size)) { gu_throw_fatal << "Keys buffer overflow by " << i + key_len - keys_size << " bytes: " << i + key_len << '/' << keys_size; } KeyPartOS kp(&keys_[i], key_len); ret.push_back(kp); i += key_len; } assert(i == keys_size); return ret; } uint8_t flags() const { return flags_; } bool operator==(const KeyOS& other) const { return (keys_ == other.keys_); } bool equal_all(const KeyOS& other) const { return (version_ == other.version_ && flags_ == other.flags_ && keys_ == other.keys_); } size_t size() const { return keys_.size() + sizeof(*this); } size_t hash() const { return gu_table_hash(&keys_[0], keys_.size()); } size_t hash_with_flags() const { return hash() ^ gu_table_hash(&flags_, sizeof(flags_)); } size_t serialize(gu::byte_t*, size_t, size_t) const; size_t unserialize(const gu::byte_t*, size_t, size_t); size_t serial_size() const; private: friend std::ostream& operator<<(std::ostream& os, const KeyOS& key); int version_; uint8_t flags_; gu::Buffer keys_; }; inline std::ostream& operator<<(std::ostream& os, const KeyOS& key) { std::ostream::fmtflags flags(os.flags()); switch (key.version_) { case 2: os << std::hex << static_cast(key.flags()) << " "; // Fall through case 1: { std::deque dq(key.key_parts >()); std::copy(dq.begin(), dq.end(), std::ostream_iterator(os, " ")); break; } default: gu_throw_fatal << "unsupported key version: " << key.version_; } os.flags(flags); return os; } inline size_t KeyOS::serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { switch (version_) { #ifndef GALERA_KEY_VLQ case 1: return gu::serialize2(keys_, buf, buflen, offset); case 2: offset = gu::serialize1(flags_, buf, buflen, offset); return gu::serialize2(keys_, buf, buflen, offset); #else case 1: { size_t keys_size(keys_.size()); offset = gu::uleb128_encode(keys_size, buf, buflen, offset); assert (offset + key_size <= buflen); std::copy(&keys_[0], &keys_[0] + keys_size, buf + offset); return (offset + keys_size); } #endif default: log_fatal << "Internal error: unsupported key version: " << version_; abort(); return 0; } } inline size_t KeyOS::unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { switch (version_) { #ifndef GALERA_KEY_VLQ case 1: return gu::unserialize2(buf, buflen, offset, keys_); case 2: offset = gu::unserialize1(buf, buflen, offset, flags_); return gu::unserialize2(buf, buflen, offset, keys_); #else case 1: { size_t len; offset = gu::uleb128_decode(buf, buflen, offset, len); keys_.resize(len); std::copy(buf + offset, buf + offset + len, keys_.begin()); return (offset + len); } #endif default: gu_throw_error(EPROTONOSUPPORT) << "unsupported key version: " << version_; } } inline size_t KeyOS::serial_size() const { switch (version_) { #ifndef GALERA_KEY_VLQ case 1: return gu::serial_size2(keys_); case 2: return (gu::serial_size(flags_) + gu::serial_size2(keys_)); #else case 1: { size_t size(gu::uleb128_size(keys_.size())); return (size + keys_.size()); } #endif default: log_fatal << "Internal error: unsupported key version: " << version_; abort(); return 0; } } } #endif // GALERA_KEY_HPP galera-26.4.3/galera/src/write_set.cpp0000664000177500017540000000661413540715002016120 0ustar dbartmy// // Copyright (C) 2010-2013 Codership Oy // #include "write_set.hpp" #include "gu_serialize.hpp" #include "gu_logger.hpp" size_t galera::WriteSet::serialize(gu::byte_t* buf, size_t buf_len, size_t offset) const { offset = gu::serialize4(keys_, buf, buf_len, offset); offset = gu::serialize4(data_, buf, buf_len, offset); return offset; } size_t galera::WriteSet::unserialize(const gu::byte_t* buf, size_t buf_len, size_t offset) { keys_.clear(); offset = gu::unserialize4(buf, buf_len, offset, keys_); offset = gu::unserialize4(buf, buf_len, offset, data_); return offset; } size_t galera::WriteSet::serial_size() const { return (gu::serial_size4(keys_) + gu::serial_size4(data_)); } std::pair galera::WriteSet::segment(const gu::byte_t* buf, size_t buf_len, size_t offset) { uint32_t data_len; offset = gu::unserialize4(buf, buf_len, offset, data_len); if (gu_unlikely(offset + data_len > buf_len)) { #ifdef NDEBUG gu_throw_error(EMSGSIZE); #else gu_throw_error(EMSGSIZE) << "offset: " << offset << ", data_len: " << data_len << ", buf_len: " << buf_len; #endif /* NDEBUG */ } return std::pair(offset, data_len); } size_t galera::WriteSet::keys(const gu::byte_t* buf, size_t buf_len, size_t offset, int version, KeySequence& ks) { std::pair seg(segment(buf, buf_len, offset)); offset = seg.first; const size_t seg_end(seg.first + seg.second); assert(seg_end <= buf_len); while (offset < seg_end) { KeyOS key(version); if ((offset = key.unserialize(buf, buf_len, offset)) == 0) { gu_throw_fatal << "failed to unserialize key"; } ks.push_back(key); } assert(offset == seg_end); return offset; } void galera::WriteSet::append_key(const KeyData& kd) { KeyOS key (kd.proto_ver, kd.parts, kd.parts_num, (kd.shared() ? galera::KeyOS::F_SHARED : 0) ); if (kd.shared()) assert(key.flags() & galera::KeyOS::F_SHARED); else assert(!(key.flags() & galera::KeyOS::F_SHARED)); const size_t hash(key.hash()); std::pair range(key_refs_.equal_range(hash)); for (KeyRefMap::const_iterator i(range.first); i != range.second; ++i) { KeyOS cmp(version_); (void)cmp.unserialize(&keys_[0], keys_.size(), i->second); if (key == cmp && key.flags() == cmp.flags()) return; } size_t key_size(key.serial_size()); size_t offset(keys_.size()); keys_.resize(offset + key_size); (void)key.serialize(&keys_[0], keys_.size(), offset); (void)key_refs_.insert(std::make_pair(hash, offset)); } void galera::WriteSet::get_keys(KeySequence& s) const { size_t offset(0); while (offset < keys_.size()) { KeyOS key(version_); if ((offset = key.unserialize(&keys_[0], keys_.size(), offset)) == 0) { gu_throw_fatal << "failed to unserialize key"; } s.push_back(key); } assert(offset == keys_.size()); } galera-26.4.3/galera/src/galera_exception.hpp0000664000177500017540000000503113540715002017421 0ustar dbartmy// // Copyright (C) 2010-2016 Codership Oy // #ifndef GALERA_EXCEPTION_HPP #define GALERA_EXCEPTION_HPP #include #include #include "wsrep_api.h" #include namespace galera { /*! * An exception to handle applier errors and avoid confusing wsrep error codes * with the standard ones */ class ApplyException : public gu::Exception { public: ApplyException (const std::string& msg, void* b1, const void* b2, size_t len) : gu::Exception(msg, -1), data_(b1), const_data_(b2), data_len_(len) { assert(NULL == b1 || NULL == b2); } ApplyException() : gu::Exception("", 0), data_(NULL), const_data_(NULL), data_len_(0) {} ApplyException(const ApplyException& ae) : gu::Exception(ae), data_(ae.data_), const_data_(ae.const_data_), data_len_(ae.data_len_) {} ~ApplyException() throw() {} /* this is just int because we must handle any positive value */ int status() const { return get_errno(); } const void* data() const { return const_data_ ? const_data_ : data_; } size_t data_len() const { return data_len_; } void free() { ::free(data_); data_ = NULL; } ApplyException& operator=(ApplyException ae) { using std::swap; #if 1 swap(static_cast(*this),static_cast(ae)); swap(this->data_, ae.data_); swap(this->const_data_, ae.const_data_); swap(this->data_len_, ae.data_len_); #else swap(*this, ae); #endif return *this; } private: void* data_; const void* const_data_; size_t data_len_; }; static inline const char* wsrep_status_str(wsrep_status_t& status) { switch (status) { case WSREP_OK: return "WSREP_OK"; case WSREP_WARNING: return "WSREP_WARNING"; case WSREP_TRX_MISSING: return "WSREP_TRX_MISSING"; case WSREP_TRX_FAIL: return "WSREP_TRX_FAIL"; case WSREP_BF_ABORT: return "WSREP_BF_ABORT"; case WSREP_CONN_FAIL: return "WSREP_CONN_FAIL"; case WSREP_NODE_FAIL: return "WSREP_NODE_FAIL"; case WSREP_FATAL: return "WSREP_FATAL"; case WSREP_NOT_IMPLEMENTED: return "WSREP_NOT_IMPLEMENTED"; default: return "(unknown code)"; } } /*! * And exception to handle replication errors */ class ReplException : public gu::Exception { public: ReplException (const std::string& msg, int err) : gu::Exception (msg, err) {} }; } #endif /* GALERA_EXCEPTION_HPP */ galera-26.4.3/galera/src/wsdb.hpp0000664000177500017540000000752113540715002015055 0ustar dbartmy// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_WSDB_HPP #define GALERA_WSDB_HPP #include "trx_handle.hpp" #include "wsrep_api.h" #include "gu_unordered.hpp" namespace galera { class Wsdb { class Conn { public: Conn(wsrep_conn_id_t conn_id) : conn_id_(conn_id), trx_() { } Conn(const Conn& other) : conn_id_(other.conn_id_), trx_(other.trx_) { } ~Conn() { } void assign_trx(TrxHandleMasterPtr trx) { trx_ = trx; } void reset_trx() { trx_ = TrxHandleMasterPtr(); } TrxHandleMasterPtr get_trx() { return trx_; } private: void operator=(const Conn&); wsrep_conn_id_t conn_id_; TrxHandleMasterPtr trx_; }; class TrxHash { public: size_t operator()(const wsrep_trx_id_t& key) const { return key; } }; typedef gu::UnorderedMap TrxMap; class ConnHash { public: size_t operator()(const wsrep_conn_id_t& key) const { return key; } }; typedef gu::UnorderedMap ConnMap; public: TrxHandleMasterPtr get_trx(const TrxHandleMaster::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t trx_id, bool create =false); TrxHandleMasterPtr new_trx(const TrxHandleMaster::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t trx_id) { return TrxHandleMasterPtr(TrxHandleMaster::New(trx_pool_, params, source_id, -1, trx_id), TrxHandleMasterDeleter()); } void discard_trx(wsrep_trx_id_t trx_id); TrxHandleMasterPtr get_conn_query(const TrxHandleMaster::Params&, const wsrep_uuid_t&, wsrep_conn_id_t conn_id, bool create = false); void discard_conn_query(wsrep_conn_id_t conn_id); Wsdb(); ~Wsdb(); void print(std::ostream& os) const; struct stats { stats(size_t n_trx, size_t n_conn) : n_trx_(n_trx) , n_conn_(n_conn) { } size_t n_trx_; size_t n_conn_; }; stats get_stats() const { gu::Lock trx_lock(trx_mutex_); gu::Lock conn_lock(conn_mutex_); stats ret(trx_map_.size(), conn_map_.size()); return ret; } private: // Create new trx handle TrxHandleMasterPtr create_trx(const TrxHandleMaster::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t trx_id); Conn* get_conn(wsrep_conn_id_t conn_id, bool create); static const size_t trx_mem_limit_ = 1 << 20; TrxHandleMaster::Pool trx_pool_; TrxMap trx_map_; gu::Mutex trx_mutex_; ConnMap conn_map_; gu::Mutex conn_mutex_; }; inline std::ostream& operator<<(std::ostream& os, const Wsdb& w) { w.print(os); return os; } } #endif // GALERA_WSDB_HPP galera-26.4.3/galera/src/key_data.cpp0000664000177500017540000000066013540715002015667 0ustar dbartmy// // Copyright (C) 2018 Codership Oy // #include "key_data.hpp" #include void galera::KeyData::print(std::ostream& os) const { os << "proto: " << proto_ver << ", type: " << type << ", copy: " << (copy ? "yes" : "no") << ", parts(" << parts_num << "):"; for (int i = 0; i < parts_num; ++i) { os << "\n\t" << gu::Hexdump(parts[i].ptr, parts[i].len, true); } } galera-26.4.3/galera/src/replicator_smm_params.cpp0000664000177500017540000001572413540715002020500 0ustar dbartmy/* Copyright (C) 2012-2018 Codership Oy */ #include "replicator_smm.hpp" #include "gcs.hpp" #include "galera_common.hpp" #include "gu_uri.hpp" #include "write_set_ng.hpp" #include "gu_throw.hpp" const std::string galera::ReplicatorSMM::Param::base_host = "base_host"; const std::string galera::ReplicatorSMM::Param::base_port = "base_port"; const std::string galera::ReplicatorSMM::Param::base_dir = "base_dir"; static const std::string common_prefix = "repl."; const std::string galera::ReplicatorSMM::Param::commit_order = common_prefix + "commit_order"; const std::string galera::ReplicatorSMM::Param::causal_read_timeout = common_prefix + "causal_read_timeout"; const std::string galera::ReplicatorSMM::Param::proto_max = common_prefix + "proto_max"; const std::string galera::ReplicatorSMM::Param::key_format = common_prefix + "key_format"; const std::string galera::ReplicatorSMM::Param::max_write_set_size = common_prefix + "max_ws_size"; int const galera::ReplicatorSMM::MAX_PROTO_VER(10); galera::ReplicatorSMM::Defaults::Defaults() : map_() { map_.insert(Default(Param::base_port, BASE_PORT_DEFAULT)); map_.insert(Default(Param::base_dir, BASE_DIR_DEFAULT)); map_.insert(Default(Param::proto_max, gu::to_string(MAX_PROTO_VER))); map_.insert(Default(Param::key_format, "FLAT8")); map_.insert(Default(Param::commit_order, "3")); map_.insert(Default(Param::causal_read_timeout, "PT30S")); const int max_write_set_size(galera::WriteSetNG::MAX_SIZE); map_.insert(Default(Param::max_write_set_size, gu::to_string(max_write_set_size))); } const galera::ReplicatorSMM::Defaults galera::ReplicatorSMM::defaults; galera::ReplicatorSMM::InitConfig::InitConfig(gu::Config& conf, const char* const node_address, const char* const base_dir) { gu::ssl_register_params(conf); Replicator::register_params(conf); std::map::const_iterator i; for (i = defaults.map_.begin(); i != defaults.map_.end(); ++i) { if (i->second.empty()) conf.add(i->first); else conf.add(i->first, i->second); } // what is would be a better protection? int const pv(gu::from_string(conf.get(Param::proto_max))); if (pv > MAX_PROTO_VER) { log_warn << "Can't set '" << Param::proto_max << "' to " << pv << ": maximum supported value is " << MAX_PROTO_VER; conf.add(Param::proto_max, gu::to_string(MAX_PROTO_VER)); } conf.add(COMMON_BASE_HOST_KEY); conf.add(COMMON_BASE_PORT_KEY); if (node_address && strlen(node_address) > 0) { gu::URI na(node_address, false); try { std::string const host = na.get_host(); if (host == "0.0.0.0" || host == "0:0:0:0:0:0:0:0" || host == "::") { gu_throw_error(EINVAL) << "Bad value for 'node_address': '" << host << '\''; } conf.set(BASE_HOST_KEY, host); } catch (gu::NotSet& e) {} try { conf.set(BASE_PORT_KEY, na.get_port()); } catch (gu::NotSet& e) {} } // Now we store directory name to conf. This directory name // could be used by other components, for example by gcomm // to find appropriate location for view state file. if (base_dir) { conf.set(BASE_DIR, base_dir); } else { conf.set(BASE_DIR, BASE_DIR_DEFAULT); } /* register variables and defaults from other modules */ gcache::GCache::register_params(conf); if (gcs_register_params(reinterpret_cast(&conf))) { gu_throw_fatal << "Error initializing GCS parameters"; } Certification::register_params(conf); ist::register_params(conf); } galera::ReplicatorSMM::ParseOptions::ParseOptions(Replicator& repl, gu::Config& conf, const char* const opts) { if (opts) conf.parse(opts); if (conf.get(Replicator::Param::debug_log)) { gu_conf_debug_on(); } else { gu_conf_debug_off(); } #ifdef GU_DBUG_ON if (conf.is_set(galera::Replicator::Param::dbug)) { GU_DBUG_PUSH(conf.get(galera::Replicator::Param::dbug).c_str()); } else { GU_DBUG_POP(); } if (conf.is_set(galera::Replicator::Param::signal)) { gu_debug_sync_signal(conf.get(galera::Replicator::Param::signal)); } #endif /* GU_DBUG_ON */ } /* helper for param_set() below */ void galera::ReplicatorSMM::set_param (const std::string& key, const std::string& value) { if (key == Param::commit_order) { log_error << "setting '" << key << "' during runtime not allowed"; gu_throw_error(EPERM) << "setting '" << key << "' during runtime not allowed"; } else if (key == Param::causal_read_timeout) { causal_read_timeout_ = gu::datetime::Period(value); } else if (key == Param::base_host || key == Param::base_port || key == Param::base_dir || key == Param::proto_max) { // nothing to do here, these params take effect only at // provider (re)start } else if (key == Param::key_format) { trx_params_.key_format_ = KeySet::version(value); } else if (key == Param::max_write_set_size) { trx_params_.max_write_set_size_ = gu::from_string(value); } else { log_warn << "parameter '" << key << "' not found"; assert(0); throw gu::NotFound(); } } void galera::ReplicatorSMM::param_set (const std::string& key, const std::string& value) { try { if (config_.get(key) == value) return; } catch (gu::NotSet&) {} bool found(false); // Note: base_host is treated separately here as it cannot have // default value known at compile time. if (defaults.map_.find(key) != defaults.map_.end() || key == Param::base_host) // is my key? { set_param (key, value); found = true; config_.set(key, value); } // this key might be for another module else if (0 != key.find(common_prefix)) { try { cert_.param_set (key, value); found = true; } catch (gu::NotFound&) {} try { gcs_.param_set (key, value); found = true; } catch (gu::NotFound&) {} try { gcache_.param_set (key, value); found = true; } catch (gu::NotFound&) {} } if (!found) throw gu::NotFound(); } std::string galera::ReplicatorSMM::param_get (const std::string& key) const { return config_.get(key); } galera-26.4.3/galera/src/galera_info.hpp0000664000177500017540000000133513540715002016361 0ustar dbartmy// Copyright (C) 2009-2018 Codership Oy #ifndef __GALERA_INFO_H__ #define __GALERA_INFO_H__ #include "gcs.hpp" #include "wsrep_api.h" /* create view info out of configuration message * if my_uuid is defined - use it to determine wsrep_view_info_t::my_idx, * otherwise set my_uuid according to my_idx */ extern wsrep_view_info_t* galera_view_info_create (const gcs_act_cchange& conf, wsrep_cap_t capabilities, int my_idx, wsrep_uuid_t& my_uuid); /* make a copy of view info object */ extern wsrep_view_info_t* galera_view_info_copy (const wsrep_view_info_t* vi); #endif // __GALERA_INFO_H__ galera-26.4.3/galera/src/key_set.cpp0000664000177500017540000003311613540715002015553 0ustar dbartmy// // Copyright (C) 2013-2018 Codership Oy // #include "key_set.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include #include // std::transform namespace galera { void KeySet::throw_version(int ver) { gu_throw_error(EINVAL) << "Unsupported KeySet version: " << ver; } static const char* ver_str[KeySet::MAX_VERSION + 1] = { "EMPTY", "FLAT8", "FLAT8A", "FLAT16", "FLAT16A" }; KeySet::Version KeySet::version (const std::string& ver) { std::string tmp(ver); std::transform(tmp.begin(), tmp.end(), tmp.begin(), ::toupper); for (int i(EMPTY); i <= MAX_VERSION; ++i) { if (tmp == ver_str[i]) return version(i); } gu_throw_error(EINVAL) << "Unsupported KeySet version: " << ver; throw; } static const char* type_str[4] = { "SH", "RE", "UP", "EX" }; const char* KeySet::type(wsrep_key_type_t t) { assert(size_t(t) < sizeof(type_str) / sizeof(type_str[0])); return type_str[t]; } size_t KeySet::KeyPart::store_annotation (const wsrep_buf_t* const parts, int const part_num, gu::byte_t* buf, int const size, int const alignment) { assert(size >= 0); /* max len representable in one byte */ static size_t const max_part_len(std::numeric_limits::max()); /* max multiple of alignment_ len representable in ann_size_t */ ann_size_t const max_ann_len(std::numeric_limits::max() / alignment * alignment); ann_size_t ann_size; int tmp_size(sizeof(ann_size)); for (int i(0); i <= part_num; ++i) { tmp_size += 1 + std::min(parts[i].len, max_part_len); } assert(tmp_size > 0); /* Make sure that final annotation size is * 1) is a multiple of alignment * 2) is representable with ann_size_t * 3) doesn't exceed dst buffer size */ ann_size = std::min(GU_ALIGN(tmp_size, alignment), max_ann_len); ann_size = std::min(ann_size, size / alignment * alignment); assert (ann_size <= size); assert ((ann_size % alignment) == 0); ann_size_t const pad_size(tmp_size < ann_size ? ann_size - tmp_size : 0); if (gu_likely(ann_size > 0)) { ann_size_t const tmp(gu::htog(ann_size)); ann_size_t off(sizeof(tmp)); ::memcpy(buf, &tmp, off); for (int i(0); i <= part_num && off < ann_size; ++i) { size_t const left(ann_size - off - 1); gu::byte_t const part_len (std::min(std::min(parts[i].len, left), max_part_len)); buf[off] = part_len; ++off; const gu::byte_t* const from( static_cast(parts[i].ptr)); std::copy(from, from + part_len, buf + off); off += part_len; } if (pad_size > 0) { ::memset(buf + off, 0, pad_size); off += pad_size; } assert (off == ann_size); } // log_info << "stored annotation of size: " << ann_size; return ann_size; } void KeySet::KeyPart::print_annotation(std::ostream& os, const gu::byte_t* buf) { ann_size_t const ann_size(gu::gtoh( *reinterpret_cast(buf))); size_t const begin(sizeof(ann_size_t)); size_t off(begin); while (off < ann_size) { if (off != begin) os << '/'; gu::byte_t const part_len(buf[off]); ++off; bool const last(ann_size == off + part_len); /* this is an attempt to guess whether we should interpret key part as * a string or numerical value */ bool const alpha(!last || part_len > 8); os << gu::Hexdump (buf + off, part_len, alpha); off += part_len; } } void KeySet::KeyPart::throw_buffer_too_short (size_t expected, size_t got) { gu_throw_error (EINVAL) << "Buffer too short: expected " << expected << ", got " << got; } void KeySet::KeyPart::throw_bad_type_version (wsrep_key_type_t t, int v) { gu_throw_error(EINVAL) << "Internal program error: wsrep key type: " << t << ", writeset version: " << v; } void KeySet::KeyPart::throw_bad_prefix (gu::byte_t p) { gu_throw_error(EPROTO) << "Unsupported key prefix: " << int(p); } void KeySet::KeyPart::throw_match_empty_key (Version my, Version other) { gu_throw_error(EINVAL) << "Attempt to match against an empty key (" << my << ',' << other << ')'; } void KeySet::KeyPart::print (std::ostream& os) const { Version const ver(version()); size_t const size(ver != EMPTY ? base_size(ver, data_, 1) : 0); os << '(' << prefix() << ',' << ver_str[ver] << ')' << gu::Hexdump(data_, size); if (annotated(ver)) { os << "="; print_annotation (os, data_ + size); } } /* returns true if left type is stronger than right */ static inline bool key_prefix_is_stronger_than(int const left, int const right) { return left > right; // for now key prefix is numerically ordered } KeySetOut::KeyPart::KeyPart (KeyParts& added, KeySetOut& store, const KeyPart* parent, const KeyData& kd, int const part_num, int const ws_ver, int const alignment) : hash_ (parent->hash_), part_ (0), value_(static_cast(kd.parts[part_num].ptr)), size_ (kd.parts[part_num].len), ver_ (parent->ver_), own_ (false) { assert (ver_); uint32_t const s(gu::htog(size_)); hash_.append (&s, sizeof(s)); hash_.append (value_, size_); KeySet::KeyPart::TmpStore ts; KeySet::KeyPart::HashData hd; hash_.gather(hd.buf); /* only leaf part of the key can be not WSREP_KEY_SHARED */ bool const leaf (part_num + 1 == kd.parts_num); wsrep_key_type_t const type (leaf ? kd.type : WSREP_KEY_SHARED); int const prefix (KeySet::KeyPart::prefix(type, ws_ver)); assert (kd.parts_num > part_num); KeySet::KeyPart kp(ts, hd, kd.parts, ver_, prefix, part_num, alignment); #if 0 /* find() way */ /* the reason to use find() first, instead of going straight to insert() * is that we need to insert the part that was persistently stored in the * key set. At the same time we can't yet store the key part in the key set * before we can be sure that it is not a duplicate. Sort of 2PC. */ KeyParts::iterator found(added.find(kp)); if (added.end() != found) { if (key_prefix_is_stronger_than(prefix, found->prefix())) { /* need to ditch weaker and add stronger version of the key */ added.erase(found); found = added.end(); } else if (leaf || key_prefix_is_stronger_than(found->prefix(), prefix)) { #ifndef NDEBUG if (leaf) log_debug << "KeyPart ctor: full duplicate of " << *found; else log_debug << "Duplicate of stronger: " << *found; #endif throw DUPLICATE(); } } if (added.end() == found) /* no such key yet, store and add */ { kp.store (store); std::pair res(added.insert(kp)); assert (res.second); found = res.first; } part_ = &(*found); #else /* insert() way */ std::pair const inserted(added.insert(kp)); if (inserted.second) { /* The key part was successfully inserted, store it in the key set buffer */ inserted.first->store (store); } else { /* A matching key part instance is already present in the set, check constraints */ if (key_prefix_is_stronger_than(prefix, inserted.first->prefix())) { /* The key part instance present in the set has weaker constraint, store this instance as well and update inserted to point there. (we can't update already stored data - it was checksummed, so we have to store a duplicate with a stronger constraint) */ kp.store (store); inserted.first->update_ptr(kp.ptr()); /* It is a hack, but it should be safe to modify key part already inserted into unordered set, as long as modification does not change hash and equality test results. And we get it to point to a duplicate here.*/ } else if (leaf || key_prefix_is_stronger_than(inserted.first->prefix(), prefix)) { /* we don't throw DUPLICATE for branch parts, just ignore them. DUPLICATE is thrown only when the whole key is a duplicate. */ #ifndef NDEBUG if (leaf) log_debug << "KeyPart ctor: full duplicate of " << *inserted.first; else log_debug << "Duplicate of exclusive: " << *inserted.first; #endif throw DUPLICATE(); } } part_ = &(*inserted.first); #endif /* insert() way */ } void KeySetOut::KeyPart::print (std::ostream& os) const { if (part_) os << *part_; else os << "0x0"; os << '(' << gu::Hexdump(value_, size_, true) << ')'; } #define CHECK_PREVIOUS_KEY 1 size_t KeySetOut::append (const KeyData& kd) { int i(0); // log_info << "Appending key data:" << kd; #ifdef CHECK_PREVIOUS_KEY /* find common ancestor with the previous key */ for (; i < kd.parts_num && size_t(i + 1) < prev_.size() && prev_[i + 1].match(kd.parts[i].ptr, kd.parts[i].len); ++i) { #if 0 log_info << "prev[" << (i+1) << "]\n" << prev_[i+1] << "\nmatches\n" << gu::Hexdump(kd.parts[i].ptr, kd.parts[i].len, true); #endif /* 0 */ } // log_info << "matched " << i << " parts"; int const kd_leaf_prefix(KeySet::KeyPart::prefix(kd.type, ws_ver_)); /* if we have a fully matched key OR common ancestor is stronger, return */ if (i > 0) { assert (size_t(i) < prev_.size()); int const exclusive_prefix (KeySet::KeyPart::prefix(WSREP_KEY_EXCLUSIVE, ws_ver_)); if (key_prefix_is_stronger_than(prev_[i].prefix(), kd_leaf_prefix) || prev_[i].prefix() == exclusive_prefix) { // log_info << "Returning after matching a stronger key:\n"< */ #ifndef GALERA_SERVICE_THD_HPP #define GALERA_SERVICE_THD_HPP #include "galera_gcs.hpp" #include #include // gu::Mutex and gu::Cond namespace galera { class ServiceThd { public: ServiceThd (GcsI& gcs, gcache::GCache& gcache); ~ServiceThd (); /*! flush all ongoing operations (before processing CC) * and install new group UUID */ void flush (const gu::UUID& uuid); /*! reset to initial state before gcs (re)connect */ void reset(); /* !!! * The following methods must be invoked only within a monitor, * so that monitors drain during CC ensures that no outdated * actions are scheduled with the service thread after that. * !!! */ /*! schedule seqno to be reported as last committed */ /* report = false is to disable sending duplicate in case of error voting * that is done through a different, blocking channel */ void report_last_committed (gcs_seqno_t seqno, bool const report = true); /*! release write sets up to and including seqno */ void release_seqno (gcs_seqno_t seqno); private: static const uint32_t A_NONE; struct Data { gu::GTID last_committed_; gcs_seqno_t release_seqno_; uint32_t act_; Data() : last_committed_(), release_seqno_ (0), act_ (A_NONE) {} }; gcache::GCache& gcache_; GcsI& gcs_; gu_thread_t thd_; gu::Mutex mtx_; gu::Cond cond_; // service request condition gu::Cond flush_; // flush condition Data data_; static void* thd_func (void*); ServiceThd (const ServiceThd&); ServiceThd& operator= (const ServiceThd&); }; } #endif /* GALERA_SERVICE_THD_HPP */ galera-26.4.3/galera/src/saved_state.hpp0000664000177500017540000000335513540715002016421 0ustar dbartmy// // Copyright (C) 2012-2018 Codership Oy // #ifndef GALERA_SAVED_STATE_HPP #define GALERA_SAVED_STATE_HPP #include "gu_atomic.hpp" #include "gu_mutex.hpp" #include "gu_lock.hpp" #include "wsrep_api.h" #include #include namespace galera { class SavedState { public: SavedState (const std::string& file); ~SavedState (); void get (wsrep_uuid_t& u, wsrep_seqno_t& s, bool& safe_to_bootstrap); void set (const wsrep_uuid_t& u, wsrep_seqno_t s, bool safe_to_bootstrap); void mark_unsafe(); void mark_safe(); void mark_corrupt(); void mark_uncorrupt(const wsrep_uuid_t& u, wsrep_seqno_t s); bool corrupt() const { return corrupt_; } void stats(long& marks, long& locks, long& writes) { marks = total_marks_(); locks = total_locks_; writes = total_writes_; } private: FILE* fs_; const std::string filename_; wsrep_uuid_t uuid_; wsrep_seqno_t seqno_; bool safe_to_bootstrap_; gu::Atomic unsafe_; bool corrupt_; /* this mutex is needed because mark_safe() and mark_corrupt() will be * called outside local monitor, so race is possible */ gu::Mutex mtx_; wsrep_uuid_t written_uuid_; ssize_t current_len_; gu::Atomic total_marks_; long total_locks_; long total_writes_; void write_file (const wsrep_uuid_t& u, const wsrep_seqno_t s, bool safe_to_bootstrap); SavedState (const SavedState&); SavedState& operator=(const SavedState&); }; /* class SavedState */ } /* namespace galera */ #endif /* GALERA_SAVED_STATE_HPP */ galera-26.4.3/galera/src/gcs_action_source.cpp0000664000177500017540000001315013540715002017575 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #include "replicator.hpp" #include "gcs_action_source.hpp" #include "trx_handle.hpp" #include "gu_serialize.hpp" #include "gu_throw.hpp" #include "galera_info.hpp" #include // Exception-safe way to release action pointer when it goes out // of scope class Release { public: Release(struct gcs_action& act, gcache::GCache& gcache) : act_(act), gcache_(gcache) {} ~Release() { switch (act_.type) { case GCS_ACT_WRITESET: case GCS_ACT_CCHANGE: // these are ordered and should be released when no longer needed break; case GCS_ACT_STATE_REQ: gcache_.free(const_cast(act_.buf)); break; default: ::free(const_cast(act_.buf)); break; } } private: struct gcs_action& act_; gcache::GCache& gcache_; }; void galera::GcsActionSource::process_writeset(void* const recv_ctx, const struct gcs_action& act, bool& exit_loop) { assert(act.seqno_g > 0); assert(act.seqno_l != GCS_SEQNO_ILL); TrxHandleSlavePtr tsp(TrxHandleSlave::New(false, trx_pool_), TrxHandleSlaveDeleter()); gu_trace(tsp->unserialize(act)); tsp->set_local(replicator_.source_id() == tsp->source_id()); gu_trace(replicator_.process_trx(recv_ctx, tsp)); exit_loop = tsp->exit_loop(); // this is the end of trx lifespan } void galera::GcsActionSource::resend_writeset(const struct gcs_action& act) { assert(act.seqno_g == -EAGAIN); assert(act.seqno_l == GCS_SEQNO_ILL); ssize_t ret; struct gu_buf const sb = { act.buf, act.size }; GcsI::WriteSetVector v; v[0] = sb; /* grab send monitor to resend asap */ while ((ret = gcs_.sendv(v, act.size, act.type, false, true)) == -EAGAIN) { usleep(1000); } if (ret > 0) { log_debug << "Local action " << gcs_act_type_to_str(act.type) << " of size " << ret << '/' << act.size << " was resent."; /* release source buffer */ gcache_.free(const_cast(act.buf)); } else { gu_throw_fatal << "Failed to resend action {" << act.buf << ", " << act.size << ", " << gcs_act_type_to_str(act.type) << "}"; } } void galera::GcsActionSource::dispatch(void* const recv_ctx, const struct gcs_action& act, bool& exit_loop) { assert(act.buf != 0); assert(act.seqno_l > 0 || act.seqno_g == -EAGAIN); switch (act.type) { case GCS_ACT_WRITESET: if (act.seqno_g > 0) { process_writeset(recv_ctx, act, exit_loop); } else { resend_writeset(act); } break; case GCS_ACT_COMMIT_CUT: { wsrep_seqno_t seqno; gu::unserialize8(act.buf, act.size, 0, seqno); assert(seqno >= 0); gu_trace(replicator_.process_commit_cut(seqno, act.seqno_l)); break; } case GCS_ACT_CCHANGE: gu_trace(replicator_.process_conf_change(recv_ctx, act)); break; case GCS_ACT_STATE_REQ: gu_trace(replicator_.process_state_req(recv_ctx, act.buf, act.size, act.seqno_l, act.seqno_g)); break; case GCS_ACT_JOIN: { wsrep_seqno_t seq; gu::unserialize8(static_cast(act.buf), act.size, 0, seq); gu_trace(replicator_.process_join(seq, act.seqno_l)); break; } case GCS_ACT_SYNC: gu_trace(replicator_.process_sync(act.seqno_l)); break; case GCS_ACT_VOTE: { int64_t seqno; size_t const off(gu::unserialize8(act.buf, act.size, 0, seqno)); int64_t code; gu::unserialize8(act.buf, act.size, off, code); assert(seqno >= 0); gu_trace(replicator_.process_vote(seqno, act.seqno_l, code)); break; } default: gu_throw_fatal << "unrecognized action type: " << act.type; } } ssize_t galera::GcsActionSource::process(void* recv_ctx, bool& exit_loop) { struct gcs_action act; ssize_t rc(gcs_.recv(act)); /* Potentially we want to do corrupt() check inside commit_monitor_ as well * but by the time inconsistency is detected an arbitrary number of * transactions may be already committed, so no reason to try that hard * in a critical section */ bool const skip(replicator_.corrupt() && GCS_ACT_CCHANGE != act.type && GCS_ACT_VOTE != act.type && /* action needs resending */ -EAGAIN != act.seqno_g); if (gu_likely(rc > 0 && !skip)) { Release release(act, gcache_); if (-EAGAIN != act.seqno_g /* replicated action */) { ++received_; received_bytes_ += rc; } try { gu_trace(dispatch(recv_ctx, act, exit_loop)); } catch (gu::Exception& e) { log_error << "Failed to process action " << act << ": " << e.what(); rc = -e.get_errno(); } } else if (rc > 0 && skip) { replicator_.cancel_seqnos(act.seqno_l, act.seqno_g); } else { assert(act.seqno_l < 0); assert(act.seqno_g < 0); } return rc; } galera-26.4.3/galera/src/galera_service_thd.cpp0000664000177500017540000001011213540715002017711 0ustar dbartmy/* * Copyright (C) 2010-2013 Codership Oy * * Using broadcasts instead of signals below to wake flush callers due to * theoretical possibility of more than 2 threads involved. */ #include "galera_service_thd.hpp" const uint32_t galera::ServiceThd::A_NONE = 0; static const uint32_t A_LAST_COMMITTED = 1U << 0; static const uint32_t A_RELEASE_SEQNO = 1U << 1; static const uint32_t A_FLUSH = 1U << 30; static const uint32_t A_EXIT = 1U << 31; void* galera::ServiceThd::thd_func (void* arg) { galera::ServiceThd* st = reinterpret_cast(arg); bool exit = false; while (!exit) { galera::ServiceThd::Data data; { gu::Lock lock(st->mtx_); if (A_NONE == st->data_.act_) lock.wait(st->cond_); data = st->data_; st->data_.act_ = A_NONE; // clear pending actions if (data.act_ & A_FLUSH) { if (A_FLUSH == data.act_) { // no other actions scheduled (all previous are "flushed") log_info << "Service thread queue flushed."; st->flush_.broadcast(); } else { // restore flush flag for the next iteration st->data_.act_ |= A_FLUSH; } } } exit = ((data.act_ & A_EXIT)); if (!exit) { if (data.act_ & A_LAST_COMMITTED) { ssize_t const ret (st->gcs_.set_last_applied(data.last_committed_)); if (gu_unlikely(ret < 0)) { log_warn << "Failed to report last committed " << data.last_committed_ << ", " << ret << " (" << strerror (-ret) << ')'; // @todo: figure out what to do in this case } else { log_debug << "Reported last committed: " << data.last_committed_; } } if (data.act_ & A_RELEASE_SEQNO) { try { st->gcache_.seqno_release(data.release_seqno_); } catch (std::exception& e) { log_warn << "Exception releasing seqno " << data.release_seqno_ << ": " << e.what(); } } } } return 0; } galera::ServiceThd::ServiceThd (GcsI& gcs, gcache::GCache& gcache) : gcache_ (gcache), gcs_ (gcs), thd_ (), mtx_ (), cond_ (), flush_ (), data_ () { gu_thread_create (&thd_, NULL, thd_func, this); } galera::ServiceThd::~ServiceThd () { { gu::Lock lock(mtx_); data_.act_ = A_EXIT; cond_.signal(); flush_.broadcast(); } gu_thread_join(thd_, NULL); } void galera::ServiceThd::flush(const gu::UUID& uuid) { gu::Lock lock(mtx_); if (!(data_.act_ & A_EXIT)) { if (data_.act_ == A_NONE) cond_.signal(); data_.act_ |= A_FLUSH; do { lock.wait(flush_); } while (data_.act_ & A_FLUSH); } data_.last_committed_.set(uuid); } void galera::ServiceThd::reset() { gu::Lock lock(mtx_); data_.act_ = A_NONE; data_.last_committed_ = gu::GTID(); } void galera::ServiceThd::report_last_committed(gcs_seqno_t const seqno, bool const report) { gu::Lock lock(mtx_); if (gu_likely(data_.last_committed_.seqno() < seqno)) { data_.last_committed_.set(seqno); if (gu_likely(report)) { if (data_.act_ == A_NONE) cond_.signal(); data_.act_ |= A_LAST_COMMITTED; } } } void galera::ServiceThd::release_seqno(gcs_seqno_t seqno) { gu::Lock lock(mtx_); if (data_.release_seqno_ < seqno) { data_.release_seqno_ = seqno; if (data_.act_ == A_NONE) cond_.signal(); data_.act_ |= A_RELEASE_SEQNO; } } galera-26.4.3/galera/src/replicator_smm.cpp0000664000177500017540000031364613540715002017141 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #include "galera_common.hpp" #include "replicator_smm.hpp" #include "gcs_action_source.hpp" #include "galera_exception.hpp" #include "galera_info.hpp" #include #include #include #include #define TX_SET_STATE(t_,s_) (t_).set_state(s_, __LINE__) wsrep_cap_t galera::ReplicatorSMM::capabilities(int protocol_version) { static uint64_t const v4_caps(WSREP_CAP_MULTI_MASTER | WSREP_CAP_CERTIFICATION | WSREP_CAP_PARALLEL_APPLYING | WSREP_CAP_TRX_REPLAY | WSREP_CAP_ISOLATION | WSREP_CAP_PAUSE | WSREP_CAP_CAUSAL_READS); static uint64_t const v5_caps(WSREP_CAP_INCREMENTAL_WRITESET | WSREP_CAP_UNORDERED | WSREP_CAP_PREORDERED); static uint64_t const v8_caps(WSREP_CAP_STREAMING); static uint64_t const v9_caps(WSREP_CAP_NBO); if (protocol_version == -1) return 0; assert(protocol_version >= 4); uint64_t caps(v4_caps); if (protocol_version >= 5) caps |= v5_caps; if (protocol_version >= 8) caps |= v8_caps; if (protocol_version >= 9) caps |= v9_caps; return caps; } std::ostream& galera::operator<<(std::ostream& os, ReplicatorSMM::State state) { switch (state) { case ReplicatorSMM::S_DESTROYED: return (os << "DESTROYED"); case ReplicatorSMM::S_CLOSED: return (os << "CLOSED"); case ReplicatorSMM::S_CONNECTED: return (os << "CONNECTED"); case ReplicatorSMM::S_JOINING: return (os << "JOINING"); case ReplicatorSMM::S_JOINED: return (os << "JOINED"); case ReplicatorSMM::S_SYNCED: return (os << "SYNCED"); case ReplicatorSMM::S_DONOR: return (os << "DONOR"); } gu_throw_fatal << "invalid state " << static_cast(state); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// // Public ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// galera::ReplicatorSMM::ReplicatorSMM(const struct wsrep_init_args* args) : ist_event_queue_ (), init_lib_ (reinterpret_cast(args->logger_cb)), config_ (), init_config_ (config_, args->node_address, args->data_dir), parse_options_ (*this, config_, args->options), init_ssl_ (config_), str_proto_ver_ (-1), protocol_version_ (-1), proto_max_ (gu::from_string(config_.get(Param::proto_max))), state_ (S_CLOSED), closing_mutex_ (), closing_cond_ (), closing_ (false), sst_state_ (SST_NONE), co_mode_ (CommitOrder::from_string( config_.get(Param::commit_order))), state_file_ (config_.get(BASE_DIR)+'/'+GALERA_STATE_FILE), st_ (state_file_), safe_to_bootstrap_ (true), trx_params_ (config_.get(BASE_DIR), -1, KeySet::version(config_.get(Param::key_format)), TrxHandleMaster::Defaults.record_set_ver_, gu::from_string(config_.get( Param::max_write_set_size))), uuid_ (WSREP_UUID_UNDEFINED), state_uuid_ (WSREP_UUID_UNDEFINED), state_uuid_str_ (), cc_seqno_ (WSREP_SEQNO_UNDEFINED), cc_lowest_trx_seqno_(WSREP_SEQNO_UNDEFINED), pause_seqno_ (WSREP_SEQNO_UNDEFINED), app_ctx_ (args->app_ctx), connected_cb_ (args->connected_cb), view_cb_ (args->view_cb), sst_request_cb_ (args->sst_request_cb), apply_cb_ (args->apply_cb), unordered_cb_ (args->unordered_cb), sst_donate_cb_ (args->sst_donate_cb), synced_cb_ (args->synced_cb), sst_donor_ (), sst_uuid_ (WSREP_UUID_UNDEFINED), sst_seqno_ (WSREP_SEQNO_UNDEFINED), sst_mutex_ (), sst_cond_ (), sst_retry_sec_ (1), sst_received_ (false), gcache_ (config_, config_.get(BASE_DIR)), gcs_ (config_, gcache_, proto_max_, args->proto_ver, args->node_name, args->node_incoming), service_thd_ (gcs_, gcache_), slave_pool_ (sizeof(TrxHandleSlave), 1024, "TrxHandleSlave"), as_ (new GcsActionSource(slave_pool_, gcs_, *this, gcache_)), ist_receiver_ (config_, gcache_, slave_pool_,*this,args->node_address), ist_senders_ (gcache_), wsdb_ (), cert_ (config_, &service_thd_), pending_cert_queue_ (), local_monitor_ (), apply_monitor_ (), commit_monitor_ (), causal_read_timeout_(config_.get(Param::causal_read_timeout)), receivers_ (), replicated_ (), replicated_bytes_ (), keys_count_ (), keys_bytes_ (), data_bytes_ (), unrd_bytes_ (), local_commits_ (), local_rollbacks_ (), local_cert_failures_(), local_replays_ (), causal_reads_ (), preordered_id_ (), incoming_list_ (""), incoming_mutex_ (), wsrep_stats_ () { // @todo add guards (and perhaps actions) state_.add_transition(Transition(S_CLOSED, S_DESTROYED)); state_.add_transition(Transition(S_CLOSED, S_CONNECTED)); state_.add_transition(Transition(S_CONNECTED, S_CLOSED)); state_.add_transition(Transition(S_CONNECTED, S_CONNECTED)); state_.add_transition(Transition(S_CONNECTED, S_JOINING)); // the following is possible only when bootstrapping new cluster // (trivial wsrep_cluster_address) state_.add_transition(Transition(S_CONNECTED, S_JOINED)); // the following are possible on PC remerge state_.add_transition(Transition(S_CONNECTED, S_DONOR)); state_.add_transition(Transition(S_CONNECTED, S_SYNCED)); state_.add_transition(Transition(S_JOINING, S_CLOSED)); // the following is possible if one non-prim conf follows another state_.add_transition(Transition(S_JOINING, S_CONNECTED)); state_.add_transition(Transition(S_JOINING, S_JOINED)); state_.add_transition(Transition(S_JOINED, S_CLOSED)); state_.add_transition(Transition(S_JOINED, S_CONNECTED)); state_.add_transition(Transition(S_JOINED, S_SYNCED)); // the following is possible if one desync() immediately follows another state_.add_transition(Transition(S_JOINED, S_DONOR)); state_.add_transition(Transition(S_SYNCED, S_CLOSED)); state_.add_transition(Transition(S_SYNCED, S_CONNECTED)); state_.add_transition(Transition(S_SYNCED, S_DONOR)); state_.add_transition(Transition(S_DONOR, S_CLOSED)); state_.add_transition(Transition(S_DONOR, S_CONNECTED)); state_.add_transition(Transition(S_DONOR, S_JOINED)); local_monitor_.set_initial_position(WSREP_UUID_UNDEFINED, 0); wsrep_uuid_t uuid; wsrep_seqno_t seqno; st_.get (uuid, seqno, safe_to_bootstrap_); if (0 != args->state_id && args->state_id->uuid != WSREP_UUID_UNDEFINED && args->state_id->uuid == uuid && seqno == WSREP_SEQNO_UNDEFINED) { /* non-trivial recovery information provided on startup, and db is safe * so use recovered seqno value */ seqno = args->state_id->seqno; } if (seqno >= 0) // non-trivial starting position { assert(uuid != WSREP_UUID_UNDEFINED); cc_seqno_ = seqno; // is it needed here? log_debug << "ReplicatorSMM() initial position: " << uuid << ':' << seqno; set_initial_position(uuid, seqno); cert_.assign_initial_position(gu::GTID(uuid, seqno), trx_params_.version_); gcache_.seqno_reset(gu::GTID(uuid, seqno)); // update gcache position to one supplied by app. } build_stats_vars(wsrep_stats_); } void galera::ReplicatorSMM::start_closing() { assert(closing_mutex_.locked()); assert(state_() >= S_CONNECTED); if (!closing_) { closing_ = true; gcs_.close(); } } void galera::ReplicatorSMM::shift_to_CLOSED() { assert(closing_mutex_.locked()); assert(closing_); state_.shift_to(S_CLOSED); if (state_uuid_ != WSREP_UUID_UNDEFINED) { st_.set (state_uuid_, last_committed(), safe_to_bootstrap_); } /* Cleanup for re-opening. */ uuid_ = WSREP_UUID_UNDEFINED; closing_ = false; if (st_.corrupt()) { /* this is a synchronization hack to make sure all receivers are done * with their work and won't access cert module any more. The usual * monitor drain is not enough here. */ while (receivers_() > 1) usleep(1000); // this should erase the memory of a pre-existing state. set_initial_position(WSREP_UUID_UNDEFINED, WSREP_SEQNO_UNDEFINED); cert_.assign_initial_position(gu::GTID(GU_UUID_NIL, -1), trx_params_.version_); sst_uuid_ = WSREP_UUID_UNDEFINED; sst_seqno_ = WSREP_SEQNO_UNDEFINED; cc_seqno_ = WSREP_SEQNO_UNDEFINED; cc_lowest_trx_seqno_ = WSREP_SEQNO_UNDEFINED; pause_seqno_ = WSREP_SEQNO_UNDEFINED; } closing_cond_.broadcast(); } void galera::ReplicatorSMM::wait_for_CLOSED(gu::Lock& lock) { assert(closing_mutex_.locked()); assert(closing_); while (state_() > S_CLOSED) lock.wait(closing_cond_); assert(!closing_); assert(WSREP_UUID_UNDEFINED == uuid_); } galera::ReplicatorSMM::~ReplicatorSMM() { log_info << "dtor state: " << state_(); gu::Lock lock(closing_mutex_); switch (state_()) { case S_CONNECTED: case S_JOINING: case S_JOINED: case S_SYNCED: case S_DONOR: start_closing(); wait_for_CLOSED(lock); // fall through case S_CLOSED: ist_senders_.cancel(); break; case S_DESTROYED: break; } delete as_; } wsrep_status_t galera::ReplicatorSMM::connect(const std::string& cluster_name, const std::string& cluster_url, const std::string& state_donor, bool const bootstrap) { sst_donor_ = state_donor; service_thd_.reset(); // make sure there was a proper initialization/cleanup assert(WSREP_UUID_UNDEFINED == uuid_); ssize_t err = 0; wsrep_status_t ret(WSREP_OK); wsrep_seqno_t const seqno(last_committed()); wsrep_uuid_t const gcs_uuid(seqno < 0 ? WSREP_UUID_UNDEFINED :state_uuid_); gu::GTID const inpos(gcs_uuid, seqno); log_info << "Setting GCS initial position to " << inpos; if ((bootstrap == true || cluster_url == "gcomm://") && safe_to_bootstrap_ == false) { log_error << "It may not be safe to bootstrap the cluster from this node. " << "It was not the last one to leave the cluster and may " << "not contain all the updates. To force cluster bootstrap " << "with this node, edit the grastate.dat file manually and " << "set safe_to_bootstrap to 1 ."; ret = WSREP_NODE_FAIL; } if (ret == WSREP_OK && (err = gcs_.set_initial_position(inpos)) != 0) { log_error << "gcs init failed:" << strerror(-err); ret = WSREP_NODE_FAIL; } if (ret == WSREP_OK && (err = gcs_.connect(cluster_name, cluster_url, bootstrap)) != 0) { log_error << "gcs connect failed: " << strerror(-err); ret = WSREP_NODE_FAIL; } if (ret == WSREP_OK) { state_.shift_to(S_CONNECTED); } return ret; } wsrep_status_t galera::ReplicatorSMM::close() { gu::Lock lock(closing_mutex_); if (state_() > S_CLOSED) { start_closing(); wait_for_CLOSED(lock); } return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::async_recv(void* recv_ctx) { if (state_() <= S_CLOSED) { log_error <<"async recv cannot start, provider in CLOSED state"; return WSREP_FATAL; } ++receivers_; bool exit_loop(false); wsrep_status_t retval(WSREP_OK); while (WSREP_OK == retval && state_() > S_CLOSED) { ssize_t rc; GU_DBUG_SYNC_EXECUTE("before_async_recv_process_sync", sleep(5);); while (gu_unlikely((rc = as_->process(recv_ctx, exit_loop)) == -ECANCELED)) { recv_IST(recv_ctx); // hack: prevent fast looping until ist controlling thread // resumes gcs prosessing usleep(10000); } if (gu_unlikely(rc <= 0)) { retval = WSREP_CONN_FAIL; } else if (gu_unlikely(exit_loop == true)) { assert(WSREP_OK == retval); if (receivers_.sub_and_fetch(1) > 0) { log_info << "Slave thread exiting on request."; break; } ++receivers_; log_warn << "Refusing exit for the last slave thread."; } } /* exiting loop already did proper checks */ if (!exit_loop && receivers_.sub_and_fetch(1) == 0) { gu::Lock lock(closing_mutex_); if (state_() > S_CLOSED && !closing_) { assert(WSREP_CONN_FAIL == retval); /* Last recv thread exiting due to error but replicator is not * closed. We need to at least gracefully leave the cluster.*/ if (WSREP_OK == retval) { log_warn << "Broken shutdown sequence, provider state: " << state_() << ", retval: " << retval; assert (0); } start_closing(); // Generate zero view before exit to notify application gcs_act_cchange const cc; wsrep_uuid_t tmp(uuid_); wsrep_view_info_t* const err_view (galera_view_info_create(cc, capabilities(cc.repl_proto_ver), -1, tmp)); view_cb_(app_ctx_, recv_ctx, err_view, 0, 0); free(err_view); shift_to_CLOSED(); } } log_debug << "Slave thread exit. Return code: " << retval; return retval; } void galera::ReplicatorSMM::apply_trx(void* recv_ctx, TrxHandleSlave& ts) { assert(ts.global_seqno() > 0); assert(!ts.is_committed()); if (!ts.skip_event()) { assert(ts.trx_id() != uint64_t(-1) || ts.is_toi()); assert(ts.certified() /*Repl*/ || ts.preordered() /*IST*/); assert(ts.local() == false || ts.nbo_end() || (ts.flags() & TrxHandle::F_ROLLBACK)); assert(ts.nbo_end() == false || ts.is_dummy()); } ApplyException ae; ApplyOrder ao(ts); CommitOrder co(ts, co_mode_); if (gu_likely(TrxHandle::S_ABORTING != ts.state())) TX_SET_STATE(ts, TrxHandle::S_APPLYING); gu_trace(apply_monitor_.enter(ao)); if (gu_unlikely(ts.nbo_start() == true)) { // Non-blocking operation start, mark state unsafe. st_.mark_unsafe(); } wsrep_trx_meta_t meta = { { state_uuid_, ts.global_seqno() }, { ts.source_id(), ts.trx_id(), ts.conn_id() }, ts.depends_seqno() }; if (ts.is_toi()) { log_debug << "Executing TO isolated action: " << ts; st_.mark_unsafe(); } wsrep_bool_t exit_loop(false); try { gu_trace(ts.apply(recv_ctx, apply_cb_, meta, exit_loop)); } catch (ApplyException& e) { assert(0 != e.status()); assert(NULL != e.data() || 0 == e.data_len()); assert(0 != e.data_len() || NULL == e.data()); if (!st_.corrupt()) { assert(0 == e.data_len()); /* non-empty error must be handled in handle_apply_error(), while * still in commit monitor. */ on_inconsistency(); } } /* at this point any other exception is fatal, not catching anything else.*/ if (ts.local() == false) { GU_DBUG_SYNC_WAIT("after_commit_slave_sync"); } wsrep_seqno_t const safe_to_discard(cert_.set_trx_committed(ts)); /* For now need to keep it inside apply monitor to ensure all processing * ends by the time monitors are drained because of potential gcache * cleanup (and loss of the writeset buffer). Perhaps unordered monitor * is needed here. */ ts.unordered(recv_ctx, unordered_cb_); apply_monitor_.leave(ao); if (ts.is_toi()) { log_debug << "Done executing TO isolated action: " << ts.global_seqno(); st_.mark_safe(); } if (gu_likely(ts.local_seqno() != -1)) { // trx with local seqno -1 originates from IST (or other source not gcs) report_last_committed(safe_to_discard); } ts.set_exit_loop(exit_loop); } wsrep_status_t galera::ReplicatorSMM::send(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) { assert(trx.locked()); if (state_() < S_JOINED) return WSREP_TRX_FAIL; // SR rollback const bool rollback(trx.flags() & TrxHandle::F_ROLLBACK); if (rollback) { assert(trx.state() == TrxHandle::S_ABORTING); assert((trx.flags() & TrxHandle::F_BEGIN) == 0); TrxHandleSlavePtr ts(TrxHandleSlave::New(true, slave_pool_), TrxHandleSlaveDeleter()); ts->set_global_seqno(0); trx.add_replicated(ts); } WriteSetNG::GatherVector actv; size_t act_size = trx.gather(actv); ssize_t rcode(0); do { const bool scheduled(!rollback); if (scheduled) { const ssize_t gcs_handle(gcs_.schedule()); if (gu_unlikely(gcs_handle < 0)) { log_debug << "gcs schedule " << strerror(-gcs_handle); rcode = gcs_handle; goto out; } trx.set_gcs_handle(gcs_handle); } trx.finalize(last_committed()); trx.unlock(); // On rollback fragment, we instruct sendv to use gcs_sm_grab() // to avoid the scenario where trx is BF aborted but can't send // ROLLBACK fragment due to flow control, which results in // deadlock. // Otherwise sendv call was scheduled above, and we instruct // the call to use regular gcs_sm_enter() const bool grab(rollback); rcode = gcs_.sendv(actv, act_size, GCS_ACT_WRITESET, scheduled, grab); GU_DBUG_SYNC_WAIT("after_send_sync"); trx.lock(); } // TODO: Break loop after some timeout while (rcode == -EAGAIN && (usleep(1000), true)); trx.set_gcs_handle(-1); out: if (rcode <= 0) { log_debug << "ReplicatorSMM::send failed: " << -rcode; } return (rcode > 0 ? WSREP_OK : WSREP_TRX_FAIL); } wsrep_status_t galera::ReplicatorSMM::replicate(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) { assert(trx.locked()); assert(!(trx.flags() & TrxHandle::F_ROLLBACK)); assert(trx.state() == TrxHandle::S_EXECUTING || trx.state() == TrxHandle::S_MUST_ABORT); if (state_() < S_JOINED || trx.state() == TrxHandle::S_MUST_ABORT) { must_abort: if (trx.state() == TrxHandle::S_EXECUTING || trx.state() == TrxHandle::S_REPLICATING) TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); TX_SET_STATE(trx, TrxHandle::S_ABORTING); if (trx.ts() != 0) { assert(trx.ts()->state() == TrxHandle::S_COMMITTED); trx.reset_ts(); } return (st_.corrupt() ? WSREP_NODE_FAIL : WSREP_CONN_FAIL); } WriteSetNG::GatherVector actv; gcs_action act; act.type = GCS_ACT_WRITESET; #ifndef NDEBUG act.seqno_g = GCS_SEQNO_ILL; #endif act.buf = NULL; act.size = trx.gather(actv); TX_SET_STATE(trx, TrxHandle::S_REPLICATING); ssize_t rcode(-1); do { assert(act.seqno_g == GCS_SEQNO_ILL); const ssize_t gcs_handle(gcs_.schedule()); if (gu_unlikely(gcs_handle < 0)) { log_debug << "gcs schedule " << strerror(-gcs_handle); goto must_abort; } trx.set_gcs_handle(gcs_handle); trx.finalize(last_committed()); trx.unlock(); assert (act.buf == NULL); // just a sanity check rcode = gcs_.replv(actv, act, true); GU_DBUG_SYNC_WAIT("after_replicate_sync") trx.lock(); } while (rcode == -EAGAIN && trx.state() != TrxHandle::S_MUST_ABORT && (usleep(1000), true)); trx.set_gcs_handle(-1); if (rcode < 0) { if (rcode != -EINTR) { log_debug << "gcs_repl() failed with " << strerror(-rcode) << " for trx " << trx; } assert(rcode != -EINTR || trx.state() == TrxHandle::S_MUST_ABORT); assert(act.seqno_l == GCS_SEQNO_ILL && act.seqno_g == GCS_SEQNO_ILL); assert(NULL == act.buf); if (trx.state() != TrxHandle::S_MUST_ABORT) { TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); } goto must_abort; } assert(act.buf != NULL); assert(act.size == rcode); assert(act.seqno_l > 0); assert(act.seqno_g > 0); TrxHandleSlavePtr ts(TrxHandleSlave::New(true, slave_pool_), TrxHandleSlaveDeleter()); gu_trace(ts->unserialize(act)); ts->set_local(true); ts->update_stats(keys_count_, keys_bytes_, data_bytes_, unrd_bytes_); trx.add_replicated(ts); ++replicated_; replicated_bytes_ += rcode; assert(trx.source_id() == ts->source_id()); assert(trx.conn_id() == ts->conn_id()); assert(trx.trx_id() == ts->trx_id()); assert(ts->global_seqno() == act.seqno_g); assert(ts->last_seen_seqno() >= 0); assert(trx.ts() == ts); wsrep_status_t retval(WSREP_TRX_FAIL); // ROLLBACK event shortcut to avoid blocking in monitors or // getting BF aborted inside provider if (gu_unlikely(ts->flags() & TrxHandle::F_ROLLBACK)) { // ROLLBACK fragments should be replicated through ReplicatorSMM::send(), // assert here for debug builds to catch if this is not a case. assert(0); assert(ts->depends_seqno() > 0); // must be set at unserialization ts->cert_bypass(true); ts->mark_certified(); TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); TX_SET_STATE(trx, TrxHandle::S_ABORTING); // to pass asserts in post_rollback TX_SET_STATE(*ts, TrxHandle::S_ABORTING); pending_cert_queue_.push(ts); cancel_monitors_for_local(*ts); goto out; } if (gu_unlikely(trx.state() == TrxHandle::S_MUST_ABORT)) { retval = cert_for_aborted(ts); if (retval != WSREP_BF_ABORT) { assert(trx.state() == TrxHandle::S_MUST_ABORT); TX_SET_STATE(trx, TrxHandle::S_ABORTING); pending_cert_queue_.push(ts); cancel_monitors_for_local(*ts); assert(ts->is_dummy()); assert(WSREP_OK != retval); } else { // If the transaction was committing, it must replay. if (ts->flags() & TrxHandle::F_COMMIT) { TX_SET_STATE(trx, TrxHandle::S_MUST_REPLAY); } else { TX_SET_STATE(*ts, TrxHandle::S_ABORTING); TX_SET_STATE(trx, TrxHandle::S_ABORTING); pending_cert_queue_.push(ts); cancel_monitors_for_local(*ts); retval = WSREP_TRX_FAIL; } } } else { assert(trx.state() == TrxHandle::S_REPLICATING); retval = WSREP_OK; } out: assert(trx.state() != TrxHandle::S_MUST_ABORT); assert(ts->global_seqno() > 0); assert(ts->global_seqno() == act.seqno_g); if (meta != 0) // whatever the retval, we must update GTID in meta { meta->gtid.uuid = state_uuid_; meta->gtid.seqno = ts->global_seqno(); meta->depends_on = ts->depends_seqno(); } return retval; } wsrep_status_t galera::ReplicatorSMM::abort_trx(TrxHandleMaster& trx, wsrep_seqno_t bf_seqno, wsrep_seqno_t* victim_seqno) { assert(trx.local() == true); assert(trx.locked()); const TrxHandleSlavePtr ts(trx.ts()); if (ts) { log_debug << "aborting ts " << *ts; assert(ts->global_seqno() != WSREP_SEQNO_UNDEFINED); if (ts->global_seqno() < bf_seqno && (ts->flags() & TrxHandle::F_COMMIT)) { log_debug << "seqno " << bf_seqno << " trying to abort seqno " << ts->global_seqno(); *victim_seqno = ts->global_seqno(); return WSREP_NOT_ALLOWED; } } else { log_debug << "aborting trx " << trx; } wsrep_status_t retval(WSREP_OK); switch (trx.state()) { case TrxHandle::S_MUST_ABORT: case TrxHandle::S_ABORTING: case TrxHandle::S_MUST_REPLAY: // victim trx was already BF aborted or it failed certification retval = WSREP_NOT_ALLOWED; break; case TrxHandle::S_EXECUTING: TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); break; case TrxHandle::S_REPLICATING: { // @note: it is important to place set_state() into beginning of // every case, because state must be changed AFTER switch() and // BEFORE entering monitors or taking any other action. TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); int rc; if (trx.gcs_handle() > 0 && ((rc = gcs_.interrupt(trx.gcs_handle()))) != 0) { log_debug << "gcs_interrupt(): handle " << trx.gcs_handle() << " trx id " << trx.trx_id() << ": " << strerror(-rc); } break; } case TrxHandle::S_CERTIFYING: { // trx is waiting in local monitor assert(ts); assert(ts->global_seqno() > 0); log_debug << "aborting ts: " << *ts << "; BF seqno: " << bf_seqno << "; local position: " << local_monitor_.last_left(); TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); LocalOrder lo(*ts); local_monitor_.interrupt(lo); break; } case TrxHandle::S_APPLYING: { // trx is waiting in apply monitor assert(ts); assert(ts->global_seqno() > 0); log_debug << "aborting ts: " << *ts << "; BF seqno: " << bf_seqno << "; apply window: " << apply_monitor_.last_left() << " - " << apply_monitor_.last_entered(); TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); ApplyOrder ao(*ts); apply_monitor_.interrupt(ao); break; } case TrxHandle::S_COMMITTING: { // Trx is waiting in commit monitor assert(ts); assert(ts->global_seqno() > 0); log_debug << "aborting ts: " << *ts << "; BF seqno: " << bf_seqno << "; commit position: " << last_committed(); if (co_mode_ != CommitOrder::BYPASS) { CommitOrder co(*ts, co_mode_); bool const interrupted(commit_monitor_.interrupt(co)); if (interrupted || !(ts->flags() & TrxHandle::F_COMMIT)) { TX_SET_STATE(trx, TrxHandle::S_MUST_ABORT); } else { retval = WSREP_NOT_ALLOWED; } } break; } case TrxHandle::S_COMMITTED: assert(ts); assert(ts->global_seqno() > 0); if (ts->global_seqno() < bf_seqno && (ts->flags() & TrxHandle::F_COMMIT)) { retval = WSREP_NOT_ALLOWED; } else { retval = WSREP_OK; } break; case TrxHandle::S_ROLLING_BACK: log_error << "Attempt to enter commit monitor while holding " "locks in rollback by " << trx; // fallthrough default: log_warn << "invalid state " << trx.state() << " in abort_trx for trx" << trx; assert(0); } if (retval == WSREP_OK || retval == WSREP_NOT_ALLOWED) { *victim_seqno = (ts != 0 ? ts->global_seqno() : WSREP_SEQNO_UNDEFINED); } return retval; } wsrep_status_t galera::ReplicatorSMM::certify(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) { assert(trx.state() == TrxHandle::S_REPLICATING); TrxHandleSlavePtr ts(trx.ts()); assert(ts->state() == TrxHandle::S_REPLICATING); // Rollback should complete with post_rollback assert((ts->flags() & TrxHandle::F_ROLLBACK) == 0); assert(ts->local_seqno() > 0); assert(ts->global_seqno() > 0); assert(ts->last_seen_seqno() >= 0); assert(ts->depends_seqno() >= -1); if (meta != 0) { assert(meta->gtid.uuid == state_uuid_); assert(meta->gtid.seqno == ts->global_seqno()); assert(meta->depends_on == ts->depends_seqno()); } // State should not be checked here: If trx has been replicated, // it has to be certified and potentially applied. #528 // if (state_() < S_JOINED) return WSREP_TRX_FAIL; wsrep_status_t retval(cert_and_catch(&trx, ts)); assert((ts->flags() & TrxHandle::F_ROLLBACK) == 0 || trx.state() == TrxHandle::S_ABORTING); if (gu_unlikely(retval != WSREP_OK)) { switch(retval) { case WSREP_BF_ABORT: assert(ts->depends_seqno() >= 0); assert(trx.state() == TrxHandle::S_MUST_REPLAY || !(ts->flags() & TrxHandle::F_COMMIT)); assert(ts->state() == TrxHandle::S_REPLICATING || ts->state() == TrxHandle::S_CERTIFYING || ts->state() == TrxHandle::S_ABORTING); // apply monitor will be entered in due course during replay break; case WSREP_TRX_FAIL: /* committing fragment fails certification or non-committing BF'ed */ assert(ts->depends_seqno() < 0 || (ts->flags() & TrxHandle::F_COMMIT) == 0); assert(ts->state() == TrxHandle::S_ABORTING); // trx will rollback, must enter apply monitor without blocking apply_monitor_enter_immediately(*ts.get()); break; default: assert(0); } return retval; } assert(ts->global_seqno() > last_committed()); assert(ts->depends_seqno() >= 0); TX_SET_STATE(trx, TrxHandle::S_APPLYING); ApplyOrder ao(*ts); bool interrupted(false); try { trx.unlock(); GU_DBUG_SYNC_WAIT("before_certify_apply_monitor_enter"); gu_trace(apply_monitor_.enter(ao)); GU_DBUG_SYNC_WAIT("after_certify_apply_monitor_enter"); trx.lock(); assert(trx.state() == TrxHandle::S_APPLYING || trx.state() == TrxHandle::S_MUST_ABORT); } catch (gu::Exception& e) { trx.lock(); if (e.get_errno() == EINTR) { interrupted = true; } else throw; } if (gu_unlikely(interrupted || trx.state() == TrxHandle::S_MUST_ABORT)) { assert(trx.state() == TrxHandle::S_MUST_ABORT); if (ts->flags() & TrxHandle::F_COMMIT) { TX_SET_STATE(trx, TrxHandle::S_MUST_REPLAY); if (!interrupted) TX_SET_STATE(*ts, TrxHandle::S_APPLYING); else assert(ts->state() == TrxHandle::S_CERTIFYING); } else { TX_SET_STATE(*ts, TrxHandle::S_ABORTING); TX_SET_STATE(trx, TrxHandle::S_ABORTING); if (interrupted == true) { assert(!apply_monitor_.entered(ao)); apply_monitor_enter_immediately(*ts); } else { assert(apply_monitor_.entered(ao)); } } retval = WSREP_BF_ABORT; } else { assert(apply_monitor_.entered(ao)); TX_SET_STATE(*ts, TrxHandle::S_APPLYING); } assert(trx.state() != TrxHandle::S_MUST_ABORT); assert((retval == WSREP_OK && (trx.state() == TrxHandle::S_APPLYING || trx.state() == TrxHandle::S_EXECUTING)) || (retval == WSREP_BF_ABORT && ( trx.state() == TrxHandle::S_MUST_REPLAY || trx.state() == TrxHandle::S_ABORTING)) ); if (meta) meta->depends_on = ts->depends_seqno(); return retval; } wsrep_status_t galera::ReplicatorSMM::replay_trx(TrxHandleMaster& trx, TrxHandleLock& lock, void* const trx_ctx) { TrxHandleSlavePtr tsp(trx.ts()); assert(tsp); TrxHandleSlave& ts(*tsp); assert(ts.global_seqno() > last_committed()); log_debug << "replay trx: " << trx << " ts: " << ts; if (trx.state() == TrxHandle::S_MUST_ABORT) { /* Aborted after certify() returned (meaning apply monitor entered) */ #ifndef NDEBUG ApplyOrder ao(ts); assert(apply_monitor_.entered(ao)); #endif TX_SET_STATE(trx, TrxHandle::S_MUST_REPLAY); } assert(trx.state() == TrxHandle::S_MUST_REPLAY); assert(trx.trx_id() != static_cast(-1)); wsrep_status_t retval(WSREP_OK); // Note: We set submit NULL trx pointer below to avoid // interrupting replaying in any monitor during replay. switch (ts.state()) { case TrxHandle::S_REPLICATING: retval = cert_and_catch(&trx, tsp); if (retval != WSREP_OK) { assert(retval == WSREP_TRX_FAIL); assert(ts.state() == TrxHandle::S_ABORTING); apply_monitor_enter_immediately(ts); break; } assert(ts.state() == TrxHandle::S_CERTIFYING); // fall through case TrxHandle::S_CERTIFYING: { assert(ts.state() == TrxHandle::S_CERTIFYING); // safety measure to make sure that all preceding trxs finish before // replaying wsrep_seqno_t const ds(ts.depends_seqno()); ts.set_depends_seqno(ts.global_seqno() - 1); ApplyOrder ao(ts); assert(apply_monitor_.entered(ao) == false); gu_trace(apply_monitor_.enter(ao)); // restore dependency info ts.set_depends_seqno(WSREP_SEQNO_UNDEFINED); ts.set_depends_seqno(ds); TX_SET_STATE(ts, TrxHandle::S_APPLYING); } // fall through case TrxHandle::S_APPLYING: // // Commit monitor will be entered from commit_order_enter_remote. // // fall through case TrxHandle::S_COMMITTING: ++local_replays_; TX_SET_STATE(trx, TrxHandle::S_REPLAYING); try { // Only committing transactions should be replayed assert(ts.flags() & TrxHandle::F_COMMIT); wsrep_trx_meta_t meta = {{ state_uuid_, ts.global_seqno() }, { ts.source_id(), ts.trx_id(), ts.conn_id() }, ts.depends_seqno()}; /* failure to replay own trx is certainly a sign of inconsistency, * not trying to catch anything here */ assert(trx.owned()); bool unused(false); lock.unlock(); gu_trace(ts.apply(trx_ctx, apply_cb_, meta, unused)); lock.lock(); assert(false == unused); log_debug << "replayed " << ts.global_seqno(); assert(ts.state() == TrxHandle::S_COMMITTED); assert(trx.state() == TrxHandle::S_COMMITTED); } catch (gu::Exception& e) { on_inconsistency(); return WSREP_NODE_FAIL; } // apply, commit monitors are released in post commit return WSREP_OK; default: assert(0); gu_throw_fatal << "Invalid state in replay for trx " << trx; } log_debug << "replaying failed for trx " << trx; assert(trx.state() == TrxHandle::S_ABORTING); return retval; } static void dump_buf(std::ostream& os, const void* const buf, size_t const buf_len) { std::ios_base::fmtflags const saved_flags(os.flags()); char const saved_fill (os.fill('0')); os << std::oct; const char* const str(static_cast(buf)); for (size_t i(0); i < buf_len; ++i) { char const c(str[i]); if ('\0' == c) break; try { if (isprint(c) || isspace(c)) { os.put(c); } else { os << '\\' << std::setw(2) << int(c); } } catch (std::ios_base::failure& f) { log_warn << "Failed to dump " << i << "th byte: " << f.what(); break; } } os.flags(saved_flags); os.fill (saved_fill); } wsrep_status_t galera::ReplicatorSMM::handle_commit_interrupt(TrxHandleMaster& trx, const TrxHandleSlave& ts) { assert(trx.state() == TrxHandle::S_MUST_ABORT); if (ts.flags() & TrxHandle::F_COMMIT) { TX_SET_STATE(trx, TrxHandle::S_MUST_REPLAY); return WSREP_BF_ABORT; } else { TX_SET_STATE(trx, TrxHandle::S_ABORTING); return WSREP_TRX_FAIL; } } wsrep_status_t galera::ReplicatorSMM::commit_order_enter_local(TrxHandleMaster& trx) { assert(trx.local()); assert(trx.ts() && trx.ts()->global_seqno() > 0); assert(trx.locked()); assert(trx.state() == TrxHandle::S_APPLYING || trx.state() == TrxHandle::S_ABORTING || trx.state() == TrxHandle::S_REPLAYING); TrxHandle::State const next_state (trx.state() == TrxHandle::S_ABORTING ? TrxHandle::S_ROLLING_BACK : TrxHandle::S_COMMITTING); TX_SET_STATE(trx, next_state); if (gu_likely(co_mode_ != CommitOrder::BYPASS)) { TrxHandleSlavePtr tsp(trx.ts()); TrxHandleSlave& ts(*tsp); #ifndef NDEBUG { ApplyOrder ao(ts); assert(apply_monitor_.entered(ao)); } #endif CommitOrder co(ts, co_mode_); if (ts.state() < TrxHandle::S_COMMITTING) { assert(!commit_monitor_.entered(co)); } else { /* was BF'ed after having entered commit monitor */ assert(commit_monitor_.entered(co)); return WSREP_OK; } try { trx.unlock(); GU_DBUG_SYNC_WAIT("before_local_commit_monitor_enter"); gu_trace(commit_monitor_.enter(co)); assert(commit_monitor_.entered(co)); trx.lock(); TX_SET_STATE(ts, ts.state() == TrxHandle::S_APPLYING ? TrxHandle::S_COMMITTING : TrxHandle::S_ROLLING_BACK); /* non-committing fragments may be interrupted after having entered * commit_monitor_ */ if (0 == (ts.flags() & TrxHandle::F_COMMIT) && trx.state() == TrxHandle::S_MUST_ABORT) return handle_commit_interrupt(trx, ts); assert(trx.state() == TrxHandle::S_COMMITTING || trx.state() == TrxHandle::S_ROLLING_BACK); } catch (gu::Exception& e) { assert(!commit_monitor_.entered(co)); assert(next_state != TrxHandle::S_ROLLING_BACK); trx.lock(); if (e.get_errno() == EINTR) { return handle_commit_interrupt(trx, ts); } else throw; } assert(ts.global_seqno() > last_committed()); } assert(trx.locked()); assert(trx.state() == TrxHandle::S_COMMITTING || trx.state() == TrxHandle::S_ROLLING_BACK); return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::commit_order_enter_remote(TrxHandleSlave& trx) { assert(trx.global_seqno() > 0); assert(trx.state() == TrxHandle::S_APPLYING || trx.state() == TrxHandle::S_ABORTING); #ifndef NDEBUG if (trx.state() == TrxHandle::S_REPLAYING) { assert(trx.local()); assert((trx.flags() & TrxHandle::F_ROLLBACK) == 0); ApplyOrder ao(trx); assert(apply_monitor_.entered(ao)); } #endif /* NDEBUG */ CommitOrder co(trx, co_mode_); assert(!commit_monitor_.entered(co)); if (gu_likely(co_mode_ != CommitOrder::BYPASS)) { gu_trace(commit_monitor_.enter(co)); } TX_SET_STATE(trx, trx.state() == TrxHandle::S_ABORTING ? TrxHandle::S_ROLLING_BACK : TrxHandle::S_COMMITTING); assert(trx.state() == TrxHandle::S_COMMITTING || trx.state() == TrxHandle::S_ROLLING_BACK); return WSREP_OK; } void galera::ReplicatorSMM::process_apply_error(TrxHandleSlave& trx, const wsrep_buf_t& error) { gu::GTID const gtid(state_uuid_, trx.global_seqno()); int res; if (trx.local_seqno() != -1 || trx.nbo_end()) { /* this must be done IN ORDER to avoid multiple elections, hence * anything else but LOCAL_OOOC and NO_OOOC is potentially broken */ res = gcs_.vote(gtid, -1, error.ptr, error.len); } else res = 2; if (res != 0) { std::ostringstream os; switch (res) { case 2: os << "Failed on preordered " << gtid << ": inconsistency."; break; case 1: os << "Inconsistent by consensus on " << gtid; break; default: os << "Could not reach consensus on " << gtid << " (rcode: " << res << "), assuming inconsistency."; } galera::ApplyException ae(os.str(), NULL, error.ptr, error.len); GU_TRACE(ae); throw ae; } else { /* mark action as invalid (skip seqno) and return normally */ gcache_.seqno_skip(trx.action().first, trx.global_seqno(), GCS_ACT_WRITESET); } } wsrep_status_t galera::ReplicatorSMM::handle_apply_error(TrxHandleSlave& ts, const wsrep_buf_t& error, const std::string& custom_msg) { assert(error.len > 0); std::ostringstream os; os << custom_msg << ts.global_seqno() << ", error: "; dump_buf(os, error.ptr, error.len); log_debug << "handle_apply_error(): " << os.str(); try { if (!st_.corrupt()) gu_trace(process_apply_error(ts, error)); return WSREP_OK; } catch (ApplyException& e) { log_error << "Inconsistency detected: " << e.what(); on_inconsistency(); } catch (gu::Exception& e) { log_error << "Unexpected exception: " << e.what(); assert(0); abort(); } catch (...) { log_error << "Unknown exception"; assert(0); abort(); } return WSREP_NODE_FAIL; } wsrep_status_t galera::ReplicatorSMM::commit_order_leave(TrxHandleSlave& trx, const wsrep_buf_t* const error) { if (trx.state() == TrxHandle::S_MUST_ABORT && (trx.flags() & TrxHandle::F_COMMIT)) { assert(0); // This is possible in case of ALG: BF applier BF aborts // trx that has already grabbed commit monitor and is committing. // However, this should be acceptable assuming that commit // operation does not reserve any more resources and is able // to release already reserved resources. log_debug << "trx was BF aborted during commit: " << trx; // manipulate state to avoid crash TX_SET_STATE(trx, TrxHandle::S_MUST_REPLAY); TX_SET_STATE(trx, TrxHandle::S_REPLAYING); } assert(trx.state() == TrxHandle::S_COMMITTING || trx.state() == TrxHandle::S_REPLAYING || trx.state() == TrxHandle::S_ABORTING || trx.state() == TrxHandle::S_ROLLING_BACK); #ifndef NDEBUG { CommitOrder co(trx, co_mode_); assert(co_mode_ != CommitOrder::BYPASS || commit_monitor_.entered(co)); } #endif TrxHandle::State end_state(trx.state() == TrxHandle::S_ROLLING_BACK ? TrxHandle::S_ROLLED_BACK :TrxHandle::S_COMMITTED); wsrep_status_t retval(WSREP_OK); if (gu_unlikely(error != NULL && error->ptr != NULL)) { end_state = TrxHandle::S_ROLLED_BACK; retval = handle_apply_error(trx, *error, "Failed to apply writeset "); } if (gu_likely(co_mode_ != CommitOrder::BYPASS)) { CommitOrder co(trx, co_mode_); commit_monitor_.leave(co); } TX_SET_STATE(trx, end_state); /* master state will be set upon release */ return retval; } wsrep_status_t galera::ReplicatorSMM::release_commit(TrxHandleMaster& trx) { TrxHandleSlavePtr tsp(trx.ts()); assert(tsp); TrxHandleSlave& ts(*tsp); #ifndef NDEBUG { CommitOrder co(ts, co_mode_); assert(co_mode_ == CommitOrder::BYPASS || commit_monitor_.entered(co) == false); } #endif log_debug << "release_commit() for trx: " << trx << " ts: " << ts; assert((ts.flags() & TrxHandle::F_ROLLBACK) == 0); assert(ts.local_seqno() > 0 && ts.global_seqno() > 0); assert(ts.state() == TrxHandle::S_COMMITTED); // Streaming transaction may enter here in aborting state if the // BF abort happens during fragment commit ordering. Otherwise // should always be committed. assert(trx.state() == TrxHandle::S_COMMITTED || (trx.state() == TrxHandle::S_ABORTING && (ts.flags() & TrxHandle::F_COMMIT) == 0)); assert(!ts.is_committed()); wsrep_seqno_t const safe_to_discard(cert_.set_trx_committed(ts)); ApplyOrder ao(ts); apply_monitor_.leave(ao); if ((ts.flags() & TrxHandle::F_COMMIT) == 0 && trx.state() == TrxHandle::S_COMMITTED) { // continue streaming TX_SET_STATE(trx, TrxHandle::S_EXECUTING); } trx.reset_ts(); ++local_commits_; report_last_committed(safe_to_discard); return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::release_rollback(TrxHandleMaster& trx) { assert(trx.locked()); if (trx.state() == TrxHandle::S_MUST_ABORT) // BF abort before replicaiton TX_SET_STATE(trx, TrxHandle::S_ABORTING); if (trx.state() == TrxHandle::S_ABORTING || trx.state() == TrxHandle::S_EXECUTING) TX_SET_STATE(trx, TrxHandle::S_ROLLED_BACK); assert(trx.state() == TrxHandle::S_ROLLED_BACK); TrxHandleSlavePtr tsp(trx.ts()); if (tsp) { TrxHandleSlave& ts(*tsp); log_debug << "release_rollback() trx: " << trx << ", ts: " << ts; if (ts.global_seqno() > 0) { // ts.depends_seqno() may be invalid here ApplyOrder ao(ts.global_seqno(), 0, ts.local()); assert(apply_monitor_.entered(ao)); if (ts.state() < TrxHandle::S_COMMITTED) { CommitOrder co(ts, co_mode_); if (ts.state() < TrxHandle::S_COMMITTING) { assert(!commit_monitor_.entered(co)); commit_monitor_.enter(co); } assert(commit_monitor_.entered(co)); commit_monitor_.leave(co); } //committed else { assert(trx.state() == TrxHandle::S_ROLLED_BACK); assert(ts.state() == TrxHandle::S_ROLLED_BACK || ts.state() == TrxHandle::S_COMMITTED); assert(ts.global_seqno() <= commit_monitor_.last_left()); } assert(commit_monitor_.last_left() >= ts.global_seqno()); /* Queued transactions will be set committed in the queue */ wsrep_seqno_t const safe_to_discard (ts.queued() ? WSREP_SEQNO_UNDEFINED : cert_.set_trx_committed(ts)); apply_monitor_.leave(ao); report_last_committed(safe_to_discard); } else { assert(0); // remove this if() } } else { log_debug << "release_rollback() trx: " << trx << ", ts: nil"; } // Trx was either rolled back by user or via certification failure, // last committed report not needed since cert index state didn't change. // report_last_committed(); trx.reset_ts(); ++local_rollbacks_; return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::sync_wait(wsrep_gtid_t* upto, int tout, wsrep_gtid_t* gtid) { gu::GTID wait_gtid; gu::datetime::Date wait_until(gu::datetime::Date::calendar() + ((tout == -1) ? gu::datetime::Period(causal_read_timeout_) : gu::datetime::Period(tout * gu::datetime::Sec))); if (upto == 0) { try { gcs_.caused(wait_gtid, wait_until); } catch (gu::Exception& e) { log_warn << "gcs_caused() returned " << -e.get_errno() << " (" << strerror(e.get_errno()) << ")"; return WSREP_TRX_FAIL; } } else { wait_gtid.set(upto->uuid, upto->seqno); } try { // @note: Using timed wait for monitor is currently a hack // to avoid deadlock resulting from race between monitor wait // and drain during configuration change. Instead of this, // monitor should have proper mechanism to interrupt waiters // at monitor drain and disallowing further waits until // configuration change related operations (SST etc) have been // finished. // Note: Since wsrep API 26 application may request release of // commit monitor before the commit actually happens (commit // may have been ordered/queued on application side for later // processing). Therefore we now rely on apply_monitor on sync // wait. This is sufficient since apply_monitor is always released // only after the whole transaction is over. apply_monitor_.wait(wait_gtid, wait_until); if (gtid != 0) { (void)last_committed_id(gtid); } ++causal_reads_; return WSREP_OK; } catch (gu::NotFound& e) { log_debug << "monitor wait failed for sync_wait: UUID mismatch"; return WSREP_TRX_MISSING; } catch (gu::Exception& e) { log_debug << "monitor wait failed for sync_wait: " << e.what(); return WSREP_TRX_FAIL; } } wsrep_status_t galera::ReplicatorSMM::last_committed_id(wsrep_gtid_t* gtid) const { // Note that we need to use apply monitor to determine last committed // here. Due to group commit implementation, the commit monitor may // be released before the commit has finished and the changes // made by the transaction have become visible. Therefore we rely // on apply monitor since it remains grabbed until the whole // commit is over. apply_monitor_.last_left_gtid(*gtid); return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::wait_nbo_end(TrxHandleMaster* trx, wsrep_trx_meta_t* meta) { gu::shared_ptr::type nbo_ctx(cert_.nbo_ctx(meta->gtid.seqno)); // Send end message trx->set_state(TrxHandle::S_REPLICATING); WriteSetNG::GatherVector actv; size_t const actv_size( trx->write_set_out().gather(trx->source_id(), trx->conn_id(), trx->trx_id(), actv)); resend: wsrep_seqno_t lc(last_committed()); if (lc == WSREP_SEQNO_UNDEFINED) { // Provider has been closed return WSREP_NODE_FAIL; } trx->finalize(lc); trx->unlock(); int err(gcs_.sendv(actv, actv_size, GCS_ACT_WRITESET, false, false)); trx->lock(); if (err == -EAGAIN || err == -ENOTCONN || err == -EINTR) { // Send was either interrupted due to states excahnge (EAGAIN), // due to non-prim (ENOTCONN) or due to timeout in send monitor // (EINTR). return WSREP_CONN_FAIL; } else if (err < 0) { log_error << "Failed to send NBO-end: " << err << ": " << ::strerror(-err); return WSREP_NODE_FAIL; } TrxHandleSlavePtr end_ts; while ((end_ts = nbo_ctx->wait_ts()) == 0) { if (closing_ || state_() == S_CLOSED) { log_error << "Closing during nonblocking operation. " "Node will be left in inconsistent state and must be " "re-initialized either by full SST or from backup."; return WSREP_FATAL; } if (nbo_ctx->aborted()) { log_debug << "NBO wait aborted, retrying send"; // Wait was aborted by view change, resend message goto resend; } } assert(end_ts->ends_nbo() != WSREP_SEQNO_UNDEFINED); trx->add_replicated(end_ts); meta->gtid.uuid = state_uuid_; meta->gtid.seqno = end_ts->global_seqno(); meta->depends_on = end_ts->depends_seqno(); ApplyOrder ao(*end_ts); apply_monitor_.enter(ao); CommitOrder co(*end_ts, co_mode_); if (co_mode_ != CommitOrder::BYPASS) { commit_monitor_.enter(co); } end_ts->set_state(TrxHandle::S_APPLYING); end_ts->set_state(TrxHandle::S_COMMITTING); trx->set_state(TrxHandle::S_CERTIFYING); trx->set_state(TrxHandle::S_APPLYING); trx->set_state(TrxHandle::S_COMMITTING); // Unref cert_.erase_nbo_ctx(end_ts->ends_nbo()); return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::to_isolation_begin(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) { assert(trx.locked()); if (trx.nbo_end()) { return wait_nbo_end(&trx, meta); } TrxHandleSlavePtr ts_ptr(trx.ts()); TrxHandleSlave& ts(*ts_ptr); if (meta != 0) { assert(meta->gtid.seqno > 0); assert(meta->gtid.seqno == ts.global_seqno()); assert(meta->depends_on == ts.depends_seqno()); } assert(trx.state() == TrxHandle::S_REPLICATING); assert(trx.trx_id() == static_cast(-1)); assert(ts.local_seqno() > -1 && ts.global_seqno() > -1); assert(ts.global_seqno() > last_committed()); CommitOrder co(ts, co_mode_); wsrep_status_t const retval(cert_and_catch(&trx, ts_ptr)); #if 0 if (ts.nbo_start() == true || ts.nbo_end() == true) { log_info << "\n MASTER processing NBO_" << (ts.nbo_start() ? "START(" : "END(") << ts.global_seqno() << ")" << (WSREP_OK == retval ? ", must apply" : ", skip") << ", ends NBO: " << ts.ends_nbo(); } #endif switch (retval) { case WSREP_OK: { TX_SET_STATE(trx, TrxHandle::S_APPLYING); TX_SET_STATE(ts, TrxHandle::S_APPLYING); ApplyOrder ao(ts); gu_trace(apply_monitor_.enter(ao)); TX_SET_STATE(trx, TrxHandle::S_COMMITTING); TX_SET_STATE(ts, TrxHandle::S_COMMITTING); break; } case WSREP_TRX_FAIL: assert(ts.state() == TrxHandle::S_ABORTING); apply_monitor_enter_immediately(ts); break; default: assert(0); gu_throw_fatal << "unrecognized retval " << retval << " for to isolation certification for " << ts; break; } if (co_mode_ != CommitOrder::BYPASS) try { commit_monitor_.enter(co); if (ts.state() == TrxHandle::S_COMMITTING) { log_debug << "Executing TO isolated action: " << ts; st_.mark_unsafe(); } else { log_debug << "Grabbed TO for failed isolated action: " << ts; assert(trx.state() == TrxHandle::S_ABORTING); } } catch (...) { gu_throw_fatal << "unable to enter commit monitor: " << ts; } return retval; } wsrep_status_t galera::ReplicatorSMM::to_isolation_end(TrxHandleMaster& trx, const wsrep_buf_t* const err) { TrxHandleSlavePtr ts_ptr(trx.ts()); TrxHandleSlave& ts(*ts_ptr); log_debug << "Done executing TO isolated action: " << ts; assert(trx.state() == TrxHandle::S_COMMITTING || trx.state() == TrxHandle::S_ABORTING); assert(ts.state() == TrxHandle::S_COMMITTING || ts.state() == TrxHandle::S_ABORTING); wsrep_status_t ret(WSREP_OK); if (NULL != err && NULL != err->ptr) { ret = handle_apply_error(ts, *err, "Failed to execute TOI action "); } CommitOrder co(ts, co_mode_); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.leave(co); wsrep_seqno_t const safe_to_discard(cert_.set_trx_committed(ts)); ApplyOrder ao(ts); apply_monitor_.leave(ao); if (ts.state() == TrxHandle::S_COMMITTING) { assert(trx.state() == TrxHandle::S_COMMITTING); TX_SET_STATE(trx, TrxHandle::S_COMMITTED); TX_SET_STATE(ts, TrxHandle::S_COMMITTED); if (trx.nbo_start() == false) st_.mark_safe(); } else { assert(trx.state() == TrxHandle::S_ABORTING); assert(ts.state() == TrxHandle::S_ABORTING); TX_SET_STATE(trx, TrxHandle::S_ROLLED_BACK); TX_SET_STATE(ts, TrxHandle::S_ROLLING_BACK); TX_SET_STATE(ts, TrxHandle::S_ROLLED_BACK); } report_last_committed(safe_to_discard); return ret; } namespace galera { static WriteSetOut* writeset_from_handle (wsrep_po_handle_t& handle, const TrxHandleMaster::Params& trx_params) { WriteSetOut* ret = static_cast(handle.opaque); if (NULL == ret) { try { ret = new WriteSetOut( // gu::String<256>(trx_params.working_dir_) << '/' << &handle, trx_params.working_dir_, wsrep_trx_id_t(&handle), /* key format is not essential since we're not adding keys */ KeySet::version(trx_params.key_format_), NULL, 0, 0, trx_params.record_set_ver_, WriteSetNG::MAX_VERSION, DataSet::MAX_VERSION, DataSet::MAX_VERSION, trx_params.max_write_set_size_); handle.opaque = ret; } catch (std::bad_alloc& ba) { gu_throw_error(ENOMEM) << "Could not create WriteSetOut"; } } return ret; } } /* namespace galera */ wsrep_status_t galera::ReplicatorSMM::preordered_collect(wsrep_po_handle_t& handle, const struct wsrep_buf* const data, size_t const count, bool const copy) { WriteSetOut* const ws(writeset_from_handle(handle, trx_params_)); for (size_t i(0); i < count; ++i) { ws->append_data(data[i].ptr, data[i].len, copy); } return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::preordered_commit(wsrep_po_handle_t& handle, const wsrep_uuid_t& source, uint64_t const flags, int const pa_range, bool const commit) { WriteSetOut* const ws(writeset_from_handle(handle, trx_params_)); if (gu_likely(true == commit)) { assert(source != WSREP_UUID_UNDEFINED); ws->set_flags (WriteSetNG::wsrep_flags_to_ws_flags(flags) | WriteSetNG::F_PREORDERED); /* by loooking at trx_id we should be able to detect gaps / lost events * (however resending is not implemented yet). Something like * * wsrep_trx_id_t const trx_id(cert_.append_preordered(source, ws)); * * begs to be here. */ wsrep_trx_id_t const trx_id(preordered_id_.add_and_fetch(1)); WriteSetNG::GatherVector actv; size_t const actv_size(ws->gather(source, 0, trx_id, actv)); ws->finalize_preordered(pa_range); // also adds checksum int rcode; do { rcode = gcs_.sendv(actv, actv_size, GCS_ACT_WRITESET, false, false); } while (rcode == -EAGAIN && (usleep(1000), true)); if (rcode < 0) gu_throw_error(-rcode) << "Replication of preordered writeset failed."; } delete ws; // cleanup regardless of commit flag handle.opaque = NULL; return WSREP_OK; } wsrep_status_t galera::ReplicatorSMM::sst_sent(const wsrep_gtid_t& state_id, int rcode) { assert (rcode <= 0); assert (rcode == 0 || state_id.seqno == WSREP_SEQNO_UNDEFINED); assert (rcode != 0 || state_id.seqno >= 0); if (state_() != S_DONOR) { log_error << "sst sent called when not SST donor, state " << state_(); return WSREP_CONN_FAIL; } if (state_id.uuid != state_uuid_ && rcode >= 0) { // state we have sent no longer corresponds to the current group state // mark an error rcode = -EREMCHG; } try { if (rcode == 0) gcs_.join(gu::GTID(state_id.uuid, state_id.seqno), rcode); else /* stamp error message with the current state */ gcs_.join(gu::GTID(state_uuid_, commit_monitor_.last_left()), rcode); return WSREP_OK; } catch (gu::Exception& e) { log_error << "failed to recover from DONOR state: " << e.what(); return WSREP_CONN_FAIL; } } void galera::ReplicatorSMM::process_trx(void* recv_ctx, const TrxHandleSlavePtr& ts_ptr) { assert(recv_ctx != 0); assert(ts_ptr != 0); TrxHandleSlave& ts(*ts_ptr); assert(ts.local_seqno() > 0); assert(ts.global_seqno() > 0); assert(ts.last_seen_seqno() >= 0); assert(ts.depends_seqno() == -1 || ts.version() >= 4); assert(ts.state() == TrxHandle::S_REPLICATING); wsrep_status_t const retval(cert_and_catch(0, ts_ptr)); #if 0 if (ts.nbo_start() == true || ts.nbo_end() == true) { log_info << "\n SLAVE processing NBO_" << (ts.nbo_start() ? "START(" : "END(") << ts.global_seqno() << ")" << (WSREP_OK == retval ? ", must apply" : ", skip") << ", ends NBO: " << ts.ends_nbo(); } #endif switch (retval) { case WSREP_TRX_FAIL: assert(ts.state() == TrxHandle::S_ABORTING); /* fall through to apply_trx() */ case WSREP_OK: try { if (ts.nbo_end() == true) { // NBO-end events are for internal operation only, not to be // consumed by application. If the NBO end happens with // different seqno than the current event's global seqno, // release monitors. In other case monitors will be grabbed // by local NBO handler threads. if (ts.ends_nbo() == WSREP_SEQNO_UNDEFINED) { assert(WSREP_OK != retval); assert(ts.state() == TrxHandle::S_ABORTING); } else { assert(WSREP_OK == retval); assert(ts.ends_nbo() > 0); // Signal NBO waiter here after leaving local ordering // critical section. gu::shared_ptr::type nbo_ctx( cert_.nbo_ctx(ts.ends_nbo())); assert(nbo_ctx != 0); nbo_ctx->set_ts(ts_ptr); break; } } gu_trace(apply_trx(recv_ctx, ts)); } catch (std::exception& e) { log_fatal << "Failed to apply trx: " << ts; log_fatal << e.what(); log_fatal << "Node consistency compromized, leaving cluster..."; mark_corrupt_and_close(); assert(0); // this is an unexpected exception // keep processing events from the queue until provider is closed } break; case WSREP_TRX_MISSING: // must be skipped due to SST assert(ts.state() == TrxHandle::S_ABORTING); break; default: // this should not happen for remote actions gu_throw_error(EINVAL) << "unrecognized retval for remote trx certification: " << retval << " trx: " << ts; } } void galera::ReplicatorSMM::process_commit_cut(wsrep_seqno_t const seq, wsrep_seqno_t const seqno_l) { assert(seq > 0); assert(seqno_l > 0); LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); if (seq >= cc_seqno_) /* Refs #782. workaround for * assert(seqno >= seqno_released_) in gcache. */ cert_.purge_trxs_upto(seq, true); local_monitor_.leave(lo); log_debug << "Got commit cut from GCS: " << seq; } /* NB: the only use for this method is in cancel_seqnos() below */ void galera::ReplicatorSMM::cancel_seqno(wsrep_seqno_t const seqno) { assert(seqno > 0); ApplyOrder ao(seqno, seqno - 1); apply_monitor_.self_cancel(ao); if (co_mode_ != CommitOrder::BYPASS) { CommitOrder co(seqno, co_mode_); commit_monitor_.self_cancel(co); } } /* NB: the only use for this method is to dismiss the slave queue * in corrupt state */ void galera::ReplicatorSMM::cancel_seqnos(wsrep_seqno_t const seqno_l, wsrep_seqno_t const seqno_g) { if (seqno_l > 0) { LocalOrder lo(seqno_l); local_monitor_.self_cancel(lo); } if (seqno_g > 0) cancel_seqno(seqno_g); } void galera::ReplicatorSMM::drain_monitors(wsrep_seqno_t const upto) { apply_monitor_.drain(upto); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.drain(upto); } void galera::ReplicatorSMM::process_vote(wsrep_seqno_t const seqno_g, wsrep_seqno_t const seqno_l, int64_t const code) { assert(seqno_g > 0); assert(seqno_l > 0); std::ostringstream msg; LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); gu::GTID const gtid(state_uuid_, seqno_g); if (code > 0) /* vote request */ { assert(GCS_VOTE_REQUEST == code); log_info << "Got vote request for seqno " << gtid; //remove /* make sure WS was either successfully applied or already voted */ if (last_committed() < seqno_g) drain_monitors(seqno_g); if (st_.corrupt()) goto out; int const ret(gcs_.vote(gtid, 0, NULL, 0)); switch (ret) { case 0: /* majority agrees */ log_info << "Vote 0 (success) on " << gtid << " is consistent with group. Continue."; goto out; case -EALREADY: /* already voted */ log_info << gtid << " already voted on. Continue."; goto out; case 1: /* majority disagrees */ msg << "Vote 0 (success) on " << gtid << " is inconsistent with group. Leaving cluster."; goto fail; default: /* general error */ assert(ret < 0); msg << "Failed to vote on request for " << gtid << ": " << -ret << " (" << ::strerror(-ret) << "). " "Assuming inconsistency"; goto fail; } } else if (code < 0) { msg << "Got negative vote on successfully applied " << gtid; fail: log_error << msg.str(); on_inconsistency(); } else { /* seems we are in majority */ } out: local_monitor_.leave(lo); } void galera::ReplicatorSMM::set_initial_position(const wsrep_uuid_t& uuid, wsrep_seqno_t const seqno) { update_state_uuid(uuid); apply_monitor_.set_initial_position(uuid, seqno); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.set_initial_position(uuid, seqno); } void galera::ReplicatorSMM::establish_protocol_versions (int proto_ver) { trx_params_.record_set_ver_ = gu::RecordSet::VER1; switch (proto_ver) { case 1: trx_params_.version_ = 1; str_proto_ver_ = 0; break; case 2: trx_params_.version_ = 1; str_proto_ver_ = 1; break; case 3: case 4: trx_params_.version_ = 2; str_proto_ver_ = 1; break; case 5: trx_params_.version_ = 3; str_proto_ver_ = 1; break; case 6: trx_params_.version_ = 3; str_proto_ver_ = 2; // gcs intelligent donor selection. // include handling dangling comma in donor string. break; case 7: // Protocol upgrade to handle IST SSL backwards compatibility, // no effect to TRX or STR protocols. trx_params_.version_ = 3; str_proto_ver_ = 2; break; case 8: // Protocol upgrade to enforce 8-byte alignment in writesets and CCs trx_params_.version_ = 3; trx_params_.record_set_ver_ = gu::RecordSet::VER2; str_proto_ver_ = 2; break; case 9: // Protocol upgrade to enable support for semi-shared key type. trx_params_.version_ = 4; trx_params_.record_set_ver_ = gu::RecordSet::VER2; str_proto_ver_ = 2; break; case 10: // Protocol upgrade to enable support for: trx_params_.version_ = 5;// PA range preset in the writeset, // WSREP_KEY_UPDATE support (API v26) trx_params_.record_set_ver_ = gu::RecordSet::VER2; str_proto_ver_ = 3; // CC events in IST. break; default: log_fatal << "Configuration change resulted in an unsupported protocol " "version: " << proto_ver << ". Can't continue."; abort(); }; protocol_version_ = proto_ver; log_info << "REPL Protocols: " << protocol_version_ << " (" << trx_params_.version_ << ", " << str_proto_ver_ << ")"; } void galera::ReplicatorSMM::record_cc_seqnos(wsrep_seqno_t cc_seqno, const char* source) { cc_seqno_ = cc_seqno; cc_lowest_trx_seqno_ = cert_.lowest_trx_seqno(); log_info << "Lowest cert indnex boundary for CC from " << source << ": " << cc_lowest_trx_seqno_;; log_info << "Min available from gcache for CC from " << source << ": " << gcache_.seqno_min(); // Lowest TRX must not have been released from gcache at this // point. assert(cc_lowest_trx_seqno_ >= gcache_.seqno_min()); } void galera::ReplicatorSMM::update_incoming_list(const wsrep_view_info_t& view) { static char const separator(','); ssize_t new_size(0); if (view.memb_num > 0) { new_size += view.memb_num - 1; // separators for (int i = 0; i < view.memb_num; ++i) { new_size += strlen(view.members[i].incoming); } } gu::Lock lock(incoming_mutex_); incoming_list_.clear(); incoming_list_.resize(new_size); if (new_size <= 0) return; incoming_list_ = view.members[0].incoming; for (int i = 1; i < view.memb_num; ++i) { incoming_list_ += separator; incoming_list_ += view.members[i].incoming; } } static galera::Replicator::State state2repl(gcs_node_state const my_state, int const my_idx) { switch (my_state) { case GCS_NODE_STATE_NON_PRIM: case GCS_NODE_STATE_PRIM: return galera::Replicator::S_CONNECTED; case GCS_NODE_STATE_JOINER: return galera::Replicator::S_JOINING; case GCS_NODE_STATE_JOINED: return galera::Replicator::S_JOINED; case GCS_NODE_STATE_SYNCED: return galera::Replicator::S_SYNCED; case GCS_NODE_STATE_DONOR: return galera::Replicator::S_DONOR; case GCS_NODE_STATE_MAX: assert(0); } gu_throw_fatal << "unhandled gcs state: " << my_state; GU_DEBUG_NORETURN; } void galera::ReplicatorSMM::submit_view_info(void* recv_ctx, const wsrep_view_info_t* view_info) { wsrep_cb_status_t const rcode (view_cb_(app_ctx_, recv_ctx, view_info, 0, 0)); if (WSREP_CB_SUCCESS != rcode) { gu_throw_fatal << "View callback failed. " "This is unrecoverable, restart required."; } } void galera::ReplicatorSMM::process_conf_change(void* recv_ctx, const struct gcs_action& cc) { static int const ORDERED_CC = 10; /* repl protocol version which orders CC */ assert(cc.seqno_l > -1); gcs_act_cchange const conf(cc.buf, cc.size); bool const from_IST(0 == cc.seqno_l); bool const ordered(conf.repl_proto_ver >= ORDERED_CC); log_info << "####### processing CC " << conf.seqno << (from_IST ? ", from IST" : ", local") << (ordered ? ", ordered" : ", unordered"); LocalOrder lo(cc.seqno_l); if (!from_IST) { gu_trace(local_monitor_.enter(lo)); gu_trace(process_pending_queue(cc.seqno_g)); } wsrep_seqno_t const upto(cert_.position()); if (upto >= last_committed()) { log_debug << "Drain monitors from " << last_committed() << " upto " << upto; gu_trace(drain_monitors(upto)); } else { /* this may happen when processing self-leave CC after connection * closure due to inconsistency. */ assert(st_.corrupt()); } int const prev_protocol_version(protocol_version_); if (conf.conf_id >= 0) // Primary configuration { assert(!from_IST || conf.repl_proto_ver >= ORDERED_CC); establish_protocol_versions (conf.repl_proto_ver); } // if CC comes from IST uuid_ must be already defined assert(!from_IST || WSREP_UUID_UNDEFINED != uuid_); // we must have either my_idx (passed in seqno_g) or uuid_ defined assert(cc.seqno_g >= 0 || WSREP_UUID_UNDEFINED != uuid_); wsrep_uuid_t new_uuid(uuid_); wsrep_view_info_t* const view_info (galera_view_info_create(conf, capabilities(conf.repl_proto_ver), (!from_IST ? cc.seqno_g : -1), new_uuid)); if (view_info->status == WSREP_VIEW_PRIMARY) { safe_to_bootstrap_ = (view_info->memb_num == 1); } int const my_idx(view_info->my_idx); gcs_node_state_t const my_state (my_idx >= 0 ? conf.memb[my_idx].state_ : GCS_NODE_STATE_NON_PRIM); assert(my_state >= GCS_NODE_STATE_NON_PRIM); assert(my_state < GCS_NODE_STATE_MAX); wsrep_seqno_t const group_seqno(view_info->state_id.seqno); const wsrep_uuid_t& group_uuid (view_info->state_id.uuid); assert(group_seqno == conf.seqno); if (!from_IST) { bool first_view(false); if (WSREP_UUID_UNDEFINED == uuid_) { uuid_ = new_uuid; first_view = true; } else { if (view_info-> memb_num > 0 && view_info->my_idx < 0) // something went wrong, member must be present in own view { std::ostringstream msg; msg << "Node UUID " << uuid_ << " is absent from the view:\n"; for (int m(0); m < view_info->memb_num; ++m) { msg << '\t' << view_info->members[m].id << '\n'; } msg << "most likely due to unexpected node identity change. " "Aborting."; log_fatal << msg.str(); abort(); } } log_info << "####### My UUID: " << uuid_; // First view from the group or group uuid has changed, // call connected callback to notify application. if ((first_view || state_uuid_ != group_uuid) && connected_cb_) { wsrep_cb_status_t cret(connected_cb_(app_ctx_, view_info)); if (cret != WSREP_CB_SUCCESS) { log_fatal << "Application returned error " << cret << " from connect callback, aborting"; abort(); } } if (conf.seqno != WSREP_SEQNO_UNDEFINED && conf.seqno <= sst_seqno_) { assert(!from_IST); log_info << "####### skipping CC " << conf.seqno << (from_IST ? ", from IST" : ", local"); // applied already in SST/IST, skip gu_trace(local_monitor_.leave(lo)); resume_recv(); gcache_.free(const_cast(cc.buf)); ::free(view_info); return; } } // !from_IST update_incoming_list(*view_info); bool const st_required (state_transfer_required(*view_info, my_state == GCS_NODE_STATE_PRIM)); void* app_req(0); size_t app_req_len(0); #ifndef NDEBUG bool app_waits_sst(false); #endif if (st_required) { log_info << "State transfer required: " << "\n\tGroup state: " << group_uuid << ":" << group_seqno << "\n\tLocal state: " << state_uuid_<< ":" << last_committed(); assert(!from_IST); if (S_CONNECTED != state_()) state_.shift_to(S_CONNECTED); wsrep_cb_status_t const rcode(sst_request_cb_(app_ctx_, &app_req, &app_req_len)); if (WSREP_CB_SUCCESS != rcode) { assert(app_req_len <= 0); log_fatal << "SST request callback failed. This is unrecoverable, " << "restart required."; abort(); } else if (0 == app_req_len && state_uuid_ != group_uuid) { log_fatal << "Local state UUID " << state_uuid_ << " is different from group state UUID " << group_uuid << ", and SST request is null: restart required."; abort(); } #ifndef NDEBUG app_waits_sst = (app_req_len > 0) && (app_req_len != (strlen(WSREP_STATE_TRANSFER_NONE) + 1) || memcmp(app_req, WSREP_STATE_TRANSFER_NONE, app_req_len)); #endif } else { log_info << "####### ST not required"; } Replicator::State const next_state(state2repl(my_state, my_idx)); if (conf.conf_id >= 0) // Primary configuration { // if protocol version >= ORDERED_CC, first CC already carries seqno 1, // so it can't be less than 1. For older protocols it can be 0. assert(group_seqno >= (protocol_version_ >= ORDERED_CC)); // // Starting from protocol_version_ 8 joiner's cert index is rebuilt // from IST. // // The reasons to reset cert index: // - Protocol version lower than ORDERED_CC (ALL) // - Protocol upgrade (ALL) // - State transfer will take a place (JOINER) // bool index_reset(protocol_version_ < ORDERED_CC || prev_protocol_version != protocol_version_ || // this last condition is a bit too strict. In fact // checking for app_waits_sst would be enough, but in // that case we'd have to skip cert index rebuilding // when there is none. // This would complicate the logic with little to no // benefits... st_required); if (index_reset) { gu::GTID position; if (protocol_version_ < ORDERED_CC) { position.set(group_uuid, group_seqno); } else { position = gu::GTID(); } /* 2 reasons for this here: * 1 - compatibility with protocols < ORDERED_CC * 2 - preparing cert index for preloading by setting seqno to 0 */ log_info << "Cert index reset to " << position << " (proto: " << protocol_version_ << "), state transfer needed: " << (st_required ? "yes" : "no"); /* flushes service thd, must be called before gcache_.seqno_reset()*/ cert_.assign_initial_position(position, trx_params_.version_); } else { log_info << "Skipping cert index reset"; } // This event can be processed 2 times: // 1) out-of-order when state transfer is required // 2) in-order (either when no state transfer or IST) // When doing it out of order, the event buffer is simply discarded if (st_required) { assert(!from_IST); // make sure we are never here from IST gu_trace(gcache_.free(const_cast(cc.buf))); // GCache::seqno_reset() happens here request_state_transfer (recv_ctx, group_uuid, group_seqno, app_req, app_req_len); } else if (conf.seqno > cert_.position()) { assert(!app_waits_sst); /* since CC does not pass certification, need to adjust cert * position explicitly (when processed in order) */ /* flushes service thd, must be called before gcache_.seqno_reset()*/ cert_.adjust_position(*view_info, gu::GTID(group_uuid, group_seqno), trx_params_.version_); // Note: Monitor release/cancel happens after view event has been // processed. log_info << "####### Setting monitor position to " << group_seqno; set_initial_position(group_uuid, group_seqno - 1); if (!from_IST) { /* CCs from IST already have seqno assigned and cert. position * adjusted */ if (protocol_version_ >= ORDERED_CC) { gu_trace(gcache_.seqno_assign(cc.buf, conf.seqno, GCS_ACT_CCHANGE, false)); } else /* before protocol ver 10 conf changes are not ordered */ { gu_trace(gcache_.free(const_cast(cc.buf))); } if (state_() == S_CONNECTED || state_() == S_DONOR) { switch (next_state) { case S_JOINING: state_.shift_to(S_JOINING); break; case S_DONOR: if (state_() == S_CONNECTED) { state_.shift_to(S_DONOR); } break; case S_JOINED: state_.shift_to(S_JOINED); break; case S_SYNCED: state_.shift_to(S_SYNCED); if (synced_cb_(app_ctx_) != WSREP_CB_SUCCESS) { log_fatal << "Synced callback failed. This is " << "unrecoverable, restart required."; abort(); } break; default: log_debug << "next_state " << next_state; break; } } } st_.set(state_uuid_, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } else { assert(!from_IST); } // record CC related state seqnos, needed for IST on DONOR record_cc_seqnos(group_seqno, "group"); // GCache must contain some actions, at least this CC assert(gcache_.seqno_min() > 0 || conf.repl_proto_ver < ORDERED_CC); if (!from_IST && state_() == S_JOINING && sst_state_ != SST_NONE) { /* There are two reasons we can be here: * 1) we just got state transfer in request_state_transfer() above; * 2) we failed here previously (probably due to partition). */ try { gcs_.join(gu::GTID(state_uuid_, sst_seqno_), 0); sst_state_ = SST_NONE; } catch (gu::Exception& e) { log_error << "Failed to JOIN the cluster after SST"; } } } else { // Non-primary configuration assert(conf.seqno == WSREP_SEQNO_UNDEFINED); assert(!from_IST); // reset sst_seqno_ every time we disconnct from PC sst_seqno_ = WSREP_SEQNO_UNDEFINED; gcache_.free(const_cast(cc.buf)); gu::Lock lock(closing_mutex_); if (S_CONNECTED != next_state) { log_fatal << "Internal error: unexpected next state for " << "non-prim: " << next_state << ". Current state: " << state_() <<". Restart required."; abort(); } if (state_() > S_CONNECTED) { assert(S_CONNECTED == next_state); state_.shift_to(S_CONNECTED); } } free(app_req); assert(!from_IST || conf.seqno > 0); assert(!st_required || conf.seqno > 0); if (!from_IST /* A separate view from IST will be passed to ISTEventQueue */ && (!st_required /* in-order processing */ || conf.seqno < 0 /* non-primary configuration */)) { try { submit_view_info(recv_ctx, view_info); } catch (gu::Exception& e) { log_fatal << e.what(); abort(); } } free(view_info); // Cancel monitors after view event has been processed by the // application. Otherwise last_committed_id() will return incorrect // value if called from view callback. // IST will release monitors after its view is processed if (ordered && !from_IST && !st_required && group_seqno > 0) cancel_seqno(group_seqno); if (!from_IST) { double foo, bar; size_t index_size; cert_.stats_get(foo, bar, index_size); local_monitor_.leave(lo); resume_recv(); } if (conf.conf_id < 0 && conf.memb.size() == 0) { assert(!from_IST); log_debug << "Received SELF-LEAVE. Connection closed."; assert(cc.seqno_l > 0); gu::Lock lock(closing_mutex_); shift_to_CLOSED(); } } void galera::ReplicatorSMM::process_join(wsrep_seqno_t seqno_j, wsrep_seqno_t seqno_l) { LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); wsrep_seqno_t const upto(cert_.position()); drain_monitors(upto); if (seqno_j < 0 && S_JOINING == state_()) { // #595, @todo: find a way to re-request state transfer log_fatal << "Failed to receive state transfer: " << seqno_j << " (" << strerror (-seqno_j) << "), need to restart."; abort(); } else { state_.shift_to(S_JOINED); } local_monitor_.leave(lo); } void galera::ReplicatorSMM::process_sync(wsrep_seqno_t seqno_l) { LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); wsrep_seqno_t const upto(cert_.position()); drain_monitors(upto); state_.shift_to(S_SYNCED); if (synced_cb_(app_ctx_) != WSREP_CB_SUCCESS) { log_fatal << "Synced callback failed. This is unrecoverable, " << "restart required."; abort(); } local_monitor_.leave(lo); } wsrep_seqno_t galera::ReplicatorSMM::pause() { // Grab local seqno for local_monitor_ wsrep_seqno_t const local_seqno( static_cast(gcs_.local_sequence())); LocalOrder lo(local_seqno); local_monitor_.enter(lo); // Local monitor should take care that concurrent // pause requests are enqueued assert(pause_seqno_ == WSREP_SEQNO_UNDEFINED); pause_seqno_ = local_seqno; // Get drain seqno from cert index wsrep_seqno_t const upto(cert_.position()); drain_monitors(upto); assert (apply_monitor_.last_left() >= upto); if (co_mode_ != CommitOrder::BYPASS) { assert (commit_monitor_.last_left() >= upto); assert (commit_monitor_.last_left() == apply_monitor_.last_left()); } wsrep_seqno_t const ret(last_committed()); st_.set(state_uuid_, ret, safe_to_bootstrap_); log_info << "Provider paused at " << state_uuid_ << ':' << ret << " (" << pause_seqno_ << ")"; return ret; } void galera::ReplicatorSMM::resume() { if (pause_seqno_ == WSREP_SEQNO_UNDEFINED) { log_warn << "tried to resume unpaused provider"; return; } st_.set(state_uuid_, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); log_info << "resuming provider at " << pause_seqno_; LocalOrder lo(pause_seqno_); pause_seqno_ = WSREP_SEQNO_UNDEFINED; local_monitor_.leave(lo); log_info << "Provider resumed."; } void galera::ReplicatorSMM::desync() { wsrep_seqno_t seqno_l; ssize_t const ret(gcs_.desync(seqno_l)); if (seqno_l > 0) { LocalOrder lo(seqno_l); // need to process it regardless of ret value if (ret == 0) { /* #706 - the check below must be state request-specific. We are not holding any locks here and must be able to wait like any other action. However practice may prove different, leaving it here as a reminder. if (local_monitor_.would_block(seqno_l)) { gu_throw_error (-EDEADLK) << "Ran out of resources waiting to " << "desync the node. " << "The node must be restarted."; } */ local_monitor_.enter(lo); if (state_() != S_DONOR) state_.shift_to(S_DONOR); local_monitor_.leave(lo); } else { local_monitor_.self_cancel(lo); } } if (ret) { gu_throw_error (-ret) << "Node desync failed."; } } void galera::ReplicatorSMM::resync() { gcs_.join(gu::GTID(state_uuid_, commit_monitor_.last_left()), 0); } ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// //// Private ////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////// /* process pending queue events scheduled before seqno */ void galera::ReplicatorSMM::process_pending_queue(wsrep_seqno_t seqno) { // pending_cert_queue_ contains all writesets that: // a) were BF aborted before being certified // b) are not going to be replayed even though // cert_for_aborted() returned TEST_OK for them // // Before certifying the current seqno, check if // pending_cert_queue contains any smaller seqno. // This avoids the certification index to diverge // across nodes. TrxHandleSlavePtr queued_ts; while ((queued_ts = pending_cert_queue_.must_cert_next(seqno)) != NULL) { log_debug << "must cert next " << seqno << " aborted ts " << *queued_ts; Certification::TestResult const result(cert_.append_trx(queued_ts)); log_debug << "trx in pending cert queue certified, result: " << result; assert(!queued_ts->cert_bypass() || Certification::TestResult::TEST_OK == result); bool const skip(Certification::TestResult::TEST_FAILED == result && !(queued_ts->cert_bypass()/* expl. ROLLBACK */)); /* at this point we are still assigning seqno to buffer in order */ gcache_.seqno_assign(queued_ts->action().first, queued_ts->global_seqno(), GCS_ACT_WRITESET, skip); cert_.set_trx_committed(*queued_ts); } } /* don't use this directly, use cert_and_catch() instead */ inline wsrep_status_t galera::ReplicatorSMM::cert(TrxHandleMaster* trx, const TrxHandleSlavePtr& ts) { assert(trx == 0 || (trx->state() == TrxHandle::S_REPLICATING || trx->state() == TrxHandle::S_MUST_REPLAY)); assert(ts->state() == TrxHandle::S_REPLICATING); assert(ts->local_seqno() != WSREP_SEQNO_UNDEFINED); assert(ts->global_seqno() != WSREP_SEQNO_UNDEFINED); assert(ts->last_seen_seqno() >= 0); assert(ts->last_seen_seqno() < ts->global_seqno()); LocalOrder lo(*ts); bool interrupted(false); bool in_replay(trx != 0 && trx->state() == TrxHandle::S_MUST_REPLAY); try { if (trx != 0) { if (in_replay == false) TX_SET_STATE(*trx, TrxHandle::S_CERTIFYING); trx->unlock(); } if (in_replay == false || local_monitor_.entered(lo) == false) { gu_trace(local_monitor_.enter(lo)); } if (trx != 0) trx->lock(); assert(trx == 0 || (trx->state() == TrxHandle::S_CERTIFYING || trx->state() == TrxHandle::S_MUST_ABORT || trx->state() == TrxHandle::S_MUST_REPLAY)); TX_SET_STATE(*ts, TrxHandle::S_CERTIFYING); } catch (gu::Exception& e) { if (trx != 0) trx->lock(); if (e.get_errno() == EINTR) { interrupted = true; } else throw; } wsrep_status_t retval(WSREP_OK); bool const applicable(ts->global_seqno() > last_committed()); assert(!ts->local() || applicable); // applicable can't be false for locals if (gu_unlikely (interrupted)) { assert(trx != 0); retval = cert_for_aborted(ts); if (WSREP_TRX_FAIL != retval) { assert(ts->state() == TrxHandle::S_REPLICATING || ts->state() == TrxHandle::S_CERTIFYING); assert(WSREP_BF_ABORT == retval); assert(trx != 0); // If the transaction was committing, it must replay. if (ts->flags() & TrxHandle::F_COMMIT) { TX_SET_STATE(*trx, TrxHandle::S_MUST_REPLAY); return retval; } // if not - we need to rollback, so pretend that certification // failed, but still update cert index to match slaves else { pending_cert_queue_.push(ts); TX_SET_STATE(*ts, TrxHandle::S_ABORTING); retval = WSREP_TRX_FAIL; } } else { assert(WSREP_TRX_FAIL == retval); assert(WSREP_SEQNO_UNDEFINED == ts->depends_seqno()); pending_cert_queue_.push(ts); } assert(WSREP_TRX_FAIL == retval); assert(TrxHandle::S_ABORTING == ts->state()); TX_SET_STATE(*trx, TrxHandle::S_ABORTING); local_monitor_.self_cancel(lo); } else { assert(ts->state() == TrxHandle::S_CERTIFYING); gu_trace(process_pending_queue(ts->global_seqno())); switch (cert_.append_trx(ts)) { case Certification::TEST_OK: // NBO_END should certify positively only if it ends NBO assert(ts->ends_nbo() > 0 || !ts->nbo_end()); if (gu_likely(applicable)) { if (trx != 0 && trx->state() == TrxHandle::S_MUST_ABORT) { if (ts->flags() & TrxHandle::F_COMMIT) { TX_SET_STATE(*trx, TrxHandle::S_MUST_REPLAY); // apply monitor will be entered during replay } else { // Abort the transaction if non-committing // fragment was BF aborted during certification. TX_SET_STATE(*trx, TrxHandle::S_ABORTING); TX_SET_STATE(*ts, TrxHandle::S_ABORTING); apply_monitor_enter_immediately(*ts); } retval = WSREP_BF_ABORT; } else { retval = WSREP_OK; } assert(ts->depends_seqno() >= 0); } else { // this can happen after SST position has been submitted // but not all actions preceding SST initial position // have been processed if (trx != 0) TX_SET_STATE(*trx, TrxHandle::S_ABORTING); TX_SET_STATE(*ts, TrxHandle::S_ABORTING); retval = WSREP_TRX_MISSING; } break; case Certification::TEST_FAILED: if (ts->nbo_end()) assert(ts->ends_nbo() == WSREP_SEQNO_UNDEFINED); assert(ts->state() == TrxHandle::S_ABORTING ); // This check is not valid anymore. NBO may reserve resource // access for longer period, which must cause certification // to fail for all operations until the operation is over. // if (gu_unlikely(trx->is_toi() && applicable)) //small sanity check // { // may happen on configuration change // log_warn << "Certification failed for TO isolated action: " //<< *trx; // assert(0); // } local_cert_failures_ += ts->local(); if (trx != 0) TX_SET_STATE(*trx, TrxHandle::S_ABORTING); retval = applicable ? WSREP_TRX_FAIL : WSREP_TRX_MISSING; break; } // at this point we are about to leave local_monitor_. Make sure // trx checksum was alright before that. ts->verify_checksum(); // we must do seqno assignment 'in order' for std::map reasons, // so keeping it inside the monitor. NBO end should never be skipped. bool const skip(ts->depends_seqno() < 0 && !ts->nbo_end()); gcache_.seqno_assign (ts->action().first, ts->global_seqno(), GCS_ACT_WRITESET, skip); if (gu_unlikely(WSREP_TRX_MISSING == retval)) { assert(!applicable); /* this trx will never go through application chain */ report_last_committed(cert_.set_trx_committed(*ts)); } local_monitor_.leave(lo); } assert(WSREP_OK == retval || WSREP_TRX_FAIL == retval || WSREP_TRX_MISSING == retval || WSREP_BF_ABORT == retval); if (gu_unlikely(WSREP_TRX_FAIL == retval)) { assert(ts->state() == TrxHandle::S_ABORTING); } else { assert(WSREP_OK != retval || ts->depends_seqno() >= 0); if (WSREP_OK != retval && ts->local()) { log_debug << "#############" << "Skipped cancel_monitors(): retval: " << retval << ", trx: " << trx << ", ts: " << *ts; } } #if 0 uint16_t const sid(*reinterpret_cast(&ts->source_id())); log_info << "######## certified g: " << ts->global_seqno() << ", s: " << ts->last_seen_seqno() << ", d: " << ts->depends_seqno() << ", sid: " << sid << ", retval: " << (retval == WSREP_OK); #endif return retval; } /* pretty much any exception in cert() is fatal as it blocks local_monitor_ */ wsrep_status_t galera::ReplicatorSMM::cert_and_catch( TrxHandleMaster* trx, const TrxHandleSlavePtr& ts) { try { return cert(trx, ts); } catch (std::exception& e) { log_fatal << "Certification exception: " << e.what(); } catch (...) { log_fatal << "Unknown certification exception"; } assert(0); abort(); } /* This must be called BEFORE local_monitor_.self_cancel() due to * gcache_.seqno_assign() */ wsrep_status_t galera::ReplicatorSMM::cert_for_aborted( const TrxHandleSlavePtr& ts) { // trx was BF aborted either while it was replicating or // while it was waiting for local monitor assert(ts->state() == TrxHandle::S_REPLICATING || ts->state() == TrxHandle::S_CERTIFYING); Certification::TestResult const res(cert_.test(ts, false)); switch (res) { case Certification::TEST_OK: return WSREP_BF_ABORT; case Certification::TEST_FAILED: // Next step will be monitors release. Make sure that ws was not // corrupted and cert failure is real before proceeding with that. //gcf788 - this must be moved to cert(), the caller method assert(ts->is_dummy()); ts->verify_checksum(); assert(!ts->nbo_end()); // should never be skipped in seqno_assign() return WSREP_TRX_FAIL; default: log_fatal << "Unexpected return value from Certification::test(): " << res; abort(); } } void galera::ReplicatorSMM::update_state_uuid (const wsrep_uuid_t& uuid) { if (state_uuid_ != uuid) { *(const_cast(&state_uuid_)) = uuid; std::ostringstream os; os << state_uuid_; // Copy only non-nil terminated part of the source string // and terminate the string explicitly to silence a warning // generated by Wstringop-truncation char* str(const_cast(state_uuid_str_)); strncpy(str, os.str().c_str(), sizeof(state_uuid_str_) - 1); str[sizeof(state_uuid_str_) - 1] = '\0'; } st_.set(uuid, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } void galera::ReplicatorSMM::abort() { log_info << "ReplicatorSMM::abort()"; gcs_.close(); gu_abort(); } galera-26.4.3/galera/src/trx_handle.cpp0000664000177500017540000004101413540715002016234 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #include "trx_handle.hpp" #include "galera_exception.hpp" #include #include const galera::TrxHandleMaster::Params galera::TrxHandleMaster::Defaults(".", -1, KeySet::MAX_VERSION, gu::RecordSet::VER2, false); void galera::TrxHandle::print_state(std::ostream& os, TrxHandle::State s) { switch (s) { case TrxHandle::S_EXECUTING: os << "EXECUTING"; return; case TrxHandle::S_MUST_ABORT: os << "MUST_ABORT"; return; case TrxHandle::S_ABORTING: os << "ABORTING"; return; case TrxHandle::S_REPLICATING: os << "REPLICATING"; return; case TrxHandle::S_CERTIFYING: os << "CERTIFYING"; return; case TrxHandle::S_MUST_REPLAY: os << "MUST_REPLAY"; return; case TrxHandle::S_REPLAYING: os << "REPLAYING"; return; case TrxHandle::S_APPLYING: os << "APPLYING"; return; case TrxHandle::S_COMMITTING: os << "COMMITTING"; return; case TrxHandle::S_ROLLING_BACK: os << "ROLLING_BACK"; return; case TrxHandle::S_COMMITTED: os << "COMMITTED"; return; case TrxHandle::S_ROLLED_BACK: os << "ROLLED_BACK"; return; // don't use default to make compiler warn if something is missed } os << ""; assert(0); } std::ostream& galera::operator<<(std::ostream& os, TrxHandle::State const s) { galera::TrxHandle::print_state(os, s); return os; } void galera::TrxHandle::print_set_state(State state) const { log_info << "Trx: " << this << " shifting to " << state; } void galera::TrxHandle::print_state_history(std::ostream& os) const { const std::vector& hist(state_.history()); for (size_t i(0); i < hist.size(); ++i) { os << hist[i].first << ':' << hist[i].second << "->"; } const TrxHandle::Fsm::StateEntry current_state(state_.get_state_entry()); os << current_state.first << ':' << current_state.second; } inline void galera::TrxHandle::print(std::ostream& os) const { os << "source: " << source_id() << " version: " << version() << " local: " << local() << " flags: " << flags() << " conn_id: " << int64_t(conn_id()) << " trx_id: " << int64_t(trx_id()) // for readability << " tstamp: " << timestamp() << "; state: "; print_state_history(os); } std::ostream& galera::operator<<(std::ostream& os, const TrxHandle& th) { th.print(os); return os; } void galera::TrxHandleSlave::print(std::ostream& os) const { TrxHandle::print(os); os << " seqnos (l: " << local_seqno_ << ", g: " << global_seqno_ << ", s: " << last_seen_seqno_ << ", d: " << depends_seqno_ << ")"; if (!skip_event()) { os << " WS pa_range: " << write_set().pa_range(); if (write_set().annotated()) { os << "\nAnnotation:\n"; write_set().write_annotation(os); os << std::endl; } } else { os << " skip event"; } os << "; state history: "; print_state_history(os); } std::ostream& galera::operator<<(std::ostream& os, const TrxHandleSlave& th) { th.print(os); return os; } galera::TrxHandleMaster::Fsm::TransMap galera::TrxHandleMaster::trans_map_; galera::TrxHandleSlave::Fsm::TransMap galera::TrxHandleSlave::trans_map_; namespace galera { // // About transaction states: // // The TrxHandleMaster stats are used to track the state of the // transaction, while TrxHandleSlave states are used to track // which critical sections have been accessed during write set // applying. As a convention, TrxHandleMaster states are changed // before entering the critical section, TrxHandleSlave states // after critical section has been succesfully entered. // // TrxHandleMaster states during normal execution: // // EXECUTING - Transaction handle has been created by appending key // or write set data // REPLICATING - Transaction write set has been send to group // communication layer for ordering // CERTIFYING - Transaction write set has been received from group // communication layer, has entered local monitor and // is certifying // APPLYING - Transaction has entered applying critical section // COMMITTING - Transaction has entered committing critical section // COMMITTED - Transaction has released commit time critical section // ROLLED_BACK - Application performed a voluntary rollback // // Note that streaming replication rollback happens by replicating // special rollback writeset which will go through regular write set // critical sections. // // Note/Fixme: CERTIFYING, APPLYING and COMMITTING states seem to be // redundant as these states can be tracked via // associated TrxHandleSlave states. // // // TrxHandleMaster states after effective BF abort: // // MUST_ABORT - Transaction enter this state after succesful BF abort. // BF abort is allowed if: // * Transaction does not have associated TrxHandleSlave // * Transaction has associated TrxHandleSlave but it does // not have commit flag set // * Transaction has associated TrxHandleSlave, commit flag // is set and the TrxHandleSlave global sequence number is // higher than BF aborter global sequence number // // 1) If the certification after BF abort results a failure: // ABORTING - BF abort was effective and certification // resulted a failure // ROLLING_BACK - Commit order critical section has been grabbed for // rollback // ROLLED_BACK - Commit order critical section has been released after // succesful rollback // // 2) The case where BF abort happens after succesful certification or // if out-of-order certification results a success: // MUST_REPLAY - The transaction must roll back and replay in applier // context. // * If the BF abort happened before certification, // certification must be performed in applier context // and the transaction replay must be aborted if // the certification fails. // * TrxHandleSlave state can be used to determine // which critical sections must be entered before the // replay. For example, if the TrxHandleSlave state is // REPLICATING, write set must be certified under local // monitor and both apply and commit monitors must be // entered before applying. On the other hand, if // TrxHandleSlave state is APPLYING, only commit monitor // must be grabbed before replay. // // TrxHandleMaster states after replication failure: // // ABORTING - Replicaition resulted a failure // ROLLING_BACK - Error has been returned to application // ROLLED_BACK - Application has finished rollback // // // TrxHandleMaster states after certification failure: // // ABORTING - Certification resulted a failure // ROLLING_BACK - Commit order critical section has been grabbed for // rollback // ROLLED_BACK - Commit order critical section has been released // after succesful rollback // // // // TrxHandleSlave: // REPLICATING - this is the first state for TrxHandleSlave after it // has been received from group // CERTIFYING - local monitor has been entered succesfully // APPLYING - apply monitor has been entered succesfully // COMMITTING - commit monitor has been entered succesfully // ABORTING - certification has failed // ROLLING_BACK - certification has failed and commit monitor has been // entered // COMMITTED - commit has been finished, commit order critical section // has been released // ROLLED_BACK - transaction has rolled back, commit order critical section // has been released // // // State machine diagrams can be found below. template<> TransMapBuilder::TransMapBuilder() : trans_map_(TrxHandleMaster::trans_map_) { // // 0 COMMITTED <-| // | ^ | // | SR | | // | |------------------------------------------------------| | // v v | | // EXECUTING -> REPLICATING -> CERTIFYING -> APPLYING -> COMMITTING | // |^ | | | | | | // || |------------------------------------------------------- | // || | BF Abort ----------------| | // || v | Cert Fail | // ||MUST_ABORT ----------------------------------------- | // || | | | | // || Pre Repl | v | REPLAYING // || | MUST_CERT_AND_REPLAY --------------| ^ // || SR Rollback v | ----------| Cert OK | // | --------- ABORTING <------- | v | // | | Cert Fail | MUST_REPLAY_AM | // | v | | | // | ROLLING_BACK | v | // | | |-> MUST_REPLAY_CM | // | v | | | // ----------> ROLLED_BACK | v | // |-> MUST_REPLAY | // | | // ------------ // // Executing add(TrxHandle::S_EXECUTING, TrxHandle::S_REPLICATING); add(TrxHandle::S_EXECUTING, TrxHandle::S_ROLLED_BACK); add(TrxHandle::S_EXECUTING, TrxHandle::S_MUST_ABORT); // Replicating add(TrxHandle::S_REPLICATING, TrxHandle::S_CERTIFYING); add(TrxHandle::S_REPLICATING, TrxHandle::S_MUST_ABORT); // Certifying add(TrxHandle::S_CERTIFYING, TrxHandle::S_APPLYING); add(TrxHandle::S_CERTIFYING, TrxHandle::S_ABORTING); add(TrxHandle::S_CERTIFYING, TrxHandle::S_MUST_ABORT); // Applying add(TrxHandle::S_APPLYING, TrxHandle::S_COMMITTING); add(TrxHandle::S_APPLYING, TrxHandle::S_MUST_ABORT); // Committing add(TrxHandle::S_COMMITTING, TrxHandle::S_COMMITTED); add(TrxHandle::S_COMMITTING, TrxHandle::S_MUST_ABORT); add(TrxHandle::S_COMMITTED, TrxHandle::S_EXECUTING); // SR // BF aborted add(TrxHandle::S_MUST_ABORT, TrxHandle::S_MUST_REPLAY); add(TrxHandle::S_MUST_ABORT, TrxHandle::S_ABORTING); // Replay, BF abort happens on application side after // commit monitor has been grabbed add(TrxHandle::S_MUST_REPLAY, TrxHandle::S_REPLAYING); // In-order certification failed for BF'ed action add(TrxHandle::S_MUST_REPLAY, TrxHandle::S_ABORTING); // Replay stage add(TrxHandle::S_REPLAYING, TrxHandle::S_COMMITTING); // BF aborted add(TrxHandle::S_ABORTING, TrxHandle::S_ROLLED_BACK); // cert failed or BF in apply monitor add(TrxHandle::S_ABORTING, TrxHandle::S_ROLLING_BACK); add(TrxHandle::S_ROLLING_BACK, TrxHandle::S_ROLLED_BACK); // SR rollback add(TrxHandle::S_ABORTING, TrxHandle::S_EXECUTING); } template<> TransMapBuilder::TransMapBuilder() : trans_map_(TrxHandleSlave::trans_map_) { // Cert OK // 0 --> REPLICATING -> CERTIFYING ------> APPLYING --> COMMITTING // | | | // | |Cert failed | // | | | // | v v // +-------> ABORTING COMMITTED / ROLLED_BACK // | // v // ROLLING_BACK // | // v // ROLLED_BACK // Enter in-order cert after replication add(TrxHandle::S_REPLICATING, TrxHandle::S_CERTIFYING); // BF'ed and IST-skipped add(TrxHandle::S_REPLICATING, TrxHandle::S_ABORTING); // Applying after certification add(TrxHandle::S_CERTIFYING, TrxHandle::S_APPLYING); // Roll back due to cert failure add(TrxHandle::S_CERTIFYING, TrxHandle::S_ABORTING); // Entering commit monitor after rollback add(TrxHandle::S_ABORTING, TrxHandle::S_ROLLING_BACK); // Entering commit monitor after applying add(TrxHandle::S_APPLYING, TrxHandle::S_COMMITTING); // Replay after BF add(TrxHandle::S_APPLYING, TrxHandle::S_REPLAYING); add(TrxHandle::S_COMMITTING, TrxHandle::S_REPLAYING); // Commit finished add(TrxHandle::S_COMMITTING, TrxHandle::S_COMMITTED); // Error reported in leave_commit_order() call add(TrxHandle::S_COMMITTING, TrxHandle::S_ROLLED_BACK); // Rollback finished add(TrxHandle::S_ROLLING_BACK, TrxHandle::S_ROLLED_BACK); } static TransMapBuilder master; static TransMapBuilder slave; } /* namespace galera */ void galera::TrxHandleSlave::sanity_checks() const { if (gu_unlikely((flags() & (F_ROLLBACK | F_BEGIN)) == (F_ROLLBACK | F_BEGIN))) { log_warn << "Both F_BEGIN and F_ROLLBACK are set on trx. " << "This trx should not have been replicated at all: " << *this; assert(0); } } void galera::TrxHandleSlave::deserialize_error_log(const gu::Exception& e) const { log_fatal << "Writeset deserialization failed: " << e.what() << std::endl << "WS flags: " << write_set_flags_ << std::endl << "Trx proto: " << version_ << std::endl << "Trx source: " << source_id_ << std::endl << "Trx conn_id: " << conn_id_ << std::endl << "Trx trx_id: " << trx_id_ << std::endl << "Trx last_seen: " << last_seen_seqno_; } void galera::TrxHandleSlave::apply (void* recv_ctx, wsrep_apply_cb_t apply_cb, const wsrep_trx_meta_t& meta, wsrep_bool_t& exit_loop) { uint32_t const wsrep_flags(trx_flags_to_wsrep_flags(flags())); int err(0); const DataSetIn& ws(write_set_.dataset()); void* err_msg(NULL); size_t err_len(0); ws.rewind(); // make sure we always start from the beginning wsrep_ws_handle_t const wh = { trx_id(), this }; if (ws.count() > 0) { for (ssize_t i = 0; WSREP_CB_SUCCESS == err && i < ws.count(); ++i) { const gu::Buf& buf(ws.next()); wsrep_buf_t const wb = { buf.ptr, size_t(buf.size) }; err = apply_cb(recv_ctx, &wh, wsrep_flags, &wb, &meta, &exit_loop); } } else { // Apply also zero sized write set to inform application side // about transaction meta data. wsrep_buf_t const wb = { NULL, 0 }; err = apply_cb(recv_ctx, &wh, wsrep_flags, &wb, &meta, &exit_loop); assert(NULL == err_msg); assert(0 == err_len); } if (gu_unlikely(err != WSREP_CB_SUCCESS)) { std::ostringstream os; os << "Apply callback failed: Trx: " << *this << ", status: " << err; galera::ApplyException ae(os.str(), err_msg, NULL, err_len); GU_TRACE(ae); throw ae; } return; } /* we don't care about any failures in applying unordered events */ void galera::TrxHandleSlave::unordered(void* recv_ctx, wsrep_unordered_cb_t cb) const { if (NULL != cb && write_set_.unrdset().count() > 0) { const DataSetIn& unrd(write_set_.unrdset()); for (int i(0); i < unrd.count(); ++i) { const gu::Buf& data(unrd.next()); wsrep_buf_t const wb = { data.ptr, size_t(data.size) }; cb(recv_ctx, &wb); } } } void galera::TrxHandleSlave::destroy_local(void* ptr) { assert(ptr); (static_cast(ptr))->~TrxHandleMaster(); } galera-26.4.3/galera/src/mapped_buffer.hpp0000664000177500017540000000320713540715002016712 0ustar dbartmy// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_MAPPED_BUFFER_HPP #define GALERA_MAPPED_BUFFER_HPP #include #include "gu_buffer.hpp" namespace galera { class MappedBuffer { public: typedef gu::byte_t& reference; typedef gu::byte_t const& const_reference; typedef gu::byte_t* iterator; typedef gu::byte_t const* const_iterator; MappedBuffer(const std::string& working_dir, size_t threshold = 1 << 20); ~MappedBuffer(); reference operator[](size_t i) { return buf_[i]; } const_reference operator[](size_t i) const { return buf_[i]; } void reserve(size_t sz); void resize(size_t sz); void clear(); size_t size() const { return buf_size_; } bool empty() const { return (buf_size_ == 0); } iterator begin() { return buf_; } iterator end() { return (buf_ + buf_size_); } const_iterator begin() const { return buf_; } const_iterator end() const { return (buf_ + buf_size_); } private: MappedBuffer(const MappedBuffer&); void operator=(const MappedBuffer&); const std::string& working_dir_; // working dir for data files std::string file_; int fd_; // file descriptor size_t threshold_; // in-memory threshold gu::byte_t* buf_; // data buffer size_t buf_size_; // buffer size (inserted data size) size_t real_buf_size_; // real buffer size (allocated size) }; } #endif // GALERA_MAPPED_BUFFER_HPP galera-26.4.3/galera/src/ist.hpp0000664000177500017540000001307413540715002014715 0ustar dbartmy// // Copyright (C) 2011-2017 Codership Oy // #ifndef GALERA_IST_HPP #define GALERA_IST_HPP #include "wsrep_api.h" #include "galera_gcs.hpp" #include "trx_handle.hpp" #include "gu_config.hpp" #include "gu_lock.hpp" #include "gu_monitor.hpp" #include "gu_asio.hpp" #include #include namespace gcache { class GCache; } namespace galera { namespace ist { void register_params(gu::Config& conf); // IST event handler interface class EventHandler { public: // Process transaction from IST virtual void ist_trx(const TrxHandleSlavePtr&, bool must_apply, bool preload) = 0; // Process conf change from IST virtual void ist_cc(const gcs_action&, bool must_apply, bool preload) = 0; // Report IST end virtual void ist_end(int error) = 0; protected: virtual ~EventHandler() {} }; class Receiver { public: static std::string const RECV_ADDR; static std::string const RECV_BIND; Receiver(gu::Config& conf, gcache::GCache&, TrxHandleSlave::Pool& slave_pool, EventHandler&, const char* addr); ~Receiver(); std::string prepare(wsrep_seqno_t first_seqno, wsrep_seqno_t last_seqno, int protocol_version, const wsrep_uuid_t& source_id); // this must be called AFTER SST is processed and we know // the starting point. void ready(wsrep_seqno_t first); wsrep_seqno_t finished(); void run(); wsrep_seqno_t first_seqno() const { return first_seqno_; } private: void interrupt(); std::string recv_addr_; std::string recv_bind_; asio::io_service io_service_; asio::ip::tcp::acceptor acceptor_; asio::ssl::context ssl_ctx_; gu::Mutex mutex_; gu::Cond cond_; wsrep_seqno_t first_seqno_; wsrep_seqno_t last_seqno_; wsrep_seqno_t current_seqno_; gu::Config& conf_; gcache::GCache& gcache_; TrxHandleSlave::Pool& slave_pool_; wsrep_uuid_t source_id_; EventHandler& handler_; gu_thread_t thread_; int error_code_; int version_; bool use_ssl_; bool running_; bool ready_; // GCC 4.8.5 on FreeBSD wants this Receiver(const Receiver&); Receiver& operator=(const Receiver&); }; class Sender { public: Sender(const gu::Config& conf, gcache::GCache& gcache, const std::string& peer, int version); virtual ~Sender(); // first - first trx seqno // last - last trx seqno // preload_start - the seqno from which sent transactions // are accompanied with index preload flag void send(wsrep_seqno_t first, wsrep_seqno_t last, wsrep_seqno_t preload_start); void cancel() { if (use_ssl_ == true) { ssl_stream_->lowest_layer().close(); } else { socket_.close(); } } private: asio::io_service io_service_; asio::ip::tcp::socket socket_; asio::ssl::context ssl_ctx_; asio::ssl::stream* ssl_stream_; const gu::Config& conf_; gcache::GCache& gcache_; int version_; bool use_ssl_; Sender(const Sender&); void operator=(const Sender&); }; class AsyncSender; class AsyncSenderMap { public: AsyncSenderMap(gcache::GCache& gcache) : senders_(), monitor_(), gcache_(gcache) { } void run(const gu::Config& conf, const std::string& peer, wsrep_seqno_t first, wsrep_seqno_t last, wsrep_seqno_t preload_start, int version); void remove(AsyncSender*, wsrep_seqno_t); void cancel(); gcache::GCache& gcache() { return gcache_; } private: std::set senders_; // use monitor instead of mutex, it provides cancellation point gu::Monitor monitor_; gcache::GCache& gcache_; }; } // namespace ist } // namespace galera #endif // GALERA_IST_HPP galera-26.4.3/galera/src/data_set.cpp0000664000177500017540000000013013540715002015662 0ustar dbartmy// // Copyright (C) 2013 Codership Oy // #include "data_set.hpp" galera-26.4.3/galera/src/write_set_ng.cpp0000664000177500017540000002424013540715002016577 0ustar dbartmy// // Copyright (C) 2013-2017 Codership Oy // #include "write_set_ng.hpp" #include #include #include #ifndef NDEBUG #include // gcache::MemOps::ALIGNMENT #endif #include namespace galera { WriteSetNG::Header::Offsets::Offsets ( int a01, int a02, int a03, int a04, int a05, int a06, int a07, int a08, int a09, int a10, int a11, int a12 ) : header_ver_ (a01), header_size_ (a02), sets_ (a03), flags_ (a04), pa_range_ (a05), last_seen_ (a06), seqno_ (a07), timestamp_ (a08), source_id_ (a09), conn_id_ (a10), trx_id_ (a11), crc_ (a12) {} WriteSetNG::Header::Offsets const WriteSetNG::Header::V3 ( V3_HEADER_VERS_OFF, V3_HEADER_SIZE_OFF, V3_SETS_OFF, V3_FLAGS_OFF, V3_PA_RANGE_OFF, V3_LAST_SEEN_OFF, V3_SEQNO_OFF, V3_TIMESTAMP_OFF, V3_SOURCE_ID_OFF, V3_CONN_ID_OFF, V3_TRX_ID_OFF, V3_CRC_OFF ); size_t WriteSetNG::Header::gather (KeySet::Version const kver, DataSet::Version const dver, bool unord, bool annot, uint16_t const flags, const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, GatherVector& out) { GU_COMPILE_ASSERT(MAX_VERSION <= 15, header_version_too_big); GU_COMPILE_ASSERT(KeySet::MAX_VERSION <= 15, keyset_version_too_big); GU_COMPILE_ASSERT(DataSet::MAX_VERSION <= 3, dataset_version_too_big); assert (uint(ver_) <= MAX_VERSION); assert (uint(kver) <= KeySet::MAX_VERSION); assert (uint(dver) <= DataSet::MAX_VERSION); local_[V3_MAGIC_OFF] = MAGIC_BYTE; local_[V3_HEADER_VERS_OFF] = (version() << 4) | VER3; local_[V3_HEADER_SIZE_OFF] = size(); local_[V3_SETS_OFF] = (kver << 4) | (dver << 2) | (unord * V3_UNORD_FLAG) | (annot * V3_ANNOT_FLAG); uint16_t* const fl(reinterpret_cast(local_ + V3_FLAGS_OFF)); uint16_t* const pa(reinterpret_cast(local_ + V3_PA_RANGE_OFF)); *fl = gu::htog(flags); *pa = 0; // certified ws will have dep. window of at least 1 wsrep_uuid_t* const sc(reinterpret_cast(local_ + V3_SOURCE_ID_OFF)); *sc = source; uint64_t* const cn(reinterpret_cast(local_ + V3_CONN_ID_OFF)); uint64_t* const tx(reinterpret_cast(local_ + V3_TRX_ID_OFF)); *cn = gu::htog(conn); *tx = gu::htog(trx); gu::Buf const buf = { ptr_, size() }; out->push_back(buf); return buf.size; } void WriteSetNG::Header::finalize(wsrep_seqno_t const last_seen, int const pa_range) { assert (ptr_); assert (size_ > 0); assert (pa_range >= -1); uint16_t* const pa(reinterpret_cast(ptr_ + V3_PA_RANGE_OFF)); uint64_t* const ls(reinterpret_cast(ptr_ + V3_LAST_SEEN_OFF)); uint64_t* const ts(reinterpret_cast(ptr_ + V3_TIMESTAMP_OFF)); *pa = gu::htog(std::min(int(MAX_PA_RANGE), pa_range)); *ls = gu::htog(last_seen); *ts = gu::htog(gu_time_monotonic()); update_checksum (ptr_, size() - V3_CHECKSUM_SIZE); } void WriteSetNG::Header::set_seqno(wsrep_seqno_t const seqno, uint16_t const pa_range) { assert (ptr_); assert (size_ > 0); assert (seqno > 0); assert (wsrep_seqno_t(pa_range) <= seqno); uint16_t* const fl(reinterpret_cast(ptr_ + V3_FLAGS_OFF)); uint16_t* const pa(reinterpret_cast(ptr_ + V3_PA_RANGE_OFF)); uint64_t* const sq(reinterpret_cast(ptr_ + V3_SEQNO_OFF)); uint16_t const flags(gu::htog(*fl)); *fl = gu::htog(flags | F_CERTIFIED); // certification happened *pa = gu::htog(pa_range); // certification outcome *sq = gu::htog(seqno); update_checksum (ptr_, size() - V3_CHECKSUM_SIZE); } gu::Buf WriteSetNG::Header::copy(bool const include_keys, bool const include_unrd) const { assert (ptr_ != &local_[0]); assert (size_t(size()) <= sizeof(local_)); gu::byte_t* const lptr(&local_[0]); ::memcpy (lptr, ptr_, size_); gu::byte_t const mask(0x0c | (0xf0 * include_keys) | (0x02 * include_unrd)); lptr[V3_SETS_OFF] &= mask; // zero up versions of non-included sets update_checksum (lptr, size() - V3_CHECKSUM_SIZE); gu::Buf ret = { lptr, size_ }; return ret; } void WriteSetNG::Header::Checksum::verify (Version ver, const void* const ptr, ssize_t const hsize) { GU_COMPILE_ASSERT(Header::V3_CHECKSUM_SIZE >= int(sizeof(type_t)), checksum_type_too_long); assert (hsize > V3_CHECKSUM_SIZE); type_t check(0), hcheck(0); size_t const csize(hsize - V3_CHECKSUM_SIZE); compute (ptr, csize, check); gu::unserialize(ptr, csize, hcheck); if (gu_likely(check == hcheck)) return; gu_throw_error (EINVAL) << "Header checksum mismatch: computed " << std::hex << std::setfill('0') << std::setw(sizeof(check) << 1) << check << ", found " << std::setw(sizeof(hcheck) << 1) << hcheck; } const char WriteSetOut::keys_suffix[] = "_keys"; const char WriteSetOut::data_suffix[] = "_data"; const char WriteSetOut::unrd_suffix[] = "_unrd"; const char WriteSetOut::annt_suffix[] = "_annt"; void WriteSetIn::init (ssize_t const st) { assert(false == check_thr_); const gu::byte_t* const pptr (header_.payload()); ssize_t const psize(size_ - header_.size()); assert (psize >= 0); KeySet::Version const kver(header_.keyset_ver()); if (kver != KeySet::EMPTY) gu_trace(keys_.init (kver, pptr, psize)); assert (false == check_); assert (false == check_thr_); if (gu_likely(st > 0)) /* checksum enforced */ { if (size_ >= st) { /* buffer too big, start checksumming in background */ int const err(gu_thread_create (&check_thr_id_, NULL, checksum_thread, this)); if (gu_likely(0 == err)) { check_thr_ = true; return; } log_warn << "Starting checksum thread failed: " << err << '(' << ::strerror(err) << ')'; /* fall through to checksum in foreground */ } checksum(); gu_trace(checksum_fin()); } else /* checksum skipped, pretend it's alright */ { check_ = true; } } void WriteSetIn::checksum() { const gu::byte_t* pptr (header_.payload()); ssize_t psize(size_ - header_.size()); assert (psize >= 0); try { if (keys_.size() > 0) { gu_trace(keys_.checksum()); size_t const tmpsize(keys_.serial_size()); psize -= tmpsize; pptr += tmpsize; assert (psize >= 0); } DataSet::Version const dver(header_.dataset_ver()); if (gu_likely(dver != DataSet::EMPTY)) { assert (psize > 0); gu_trace(data_.init(dver, pptr, psize)); gu_trace(data_.checksum()); size_t const tmpsize(data_.serial_size()); psize -= tmpsize; pptr += tmpsize; assert (psize >= 0); if (header_.has_unrd()) { gu_trace(unrd_.init(dver, pptr, psize)); gu_trace(unrd_.checksum()); size_t const tmpsize(unrd_.serial_size()); psize -= tmpsize; pptr += tmpsize; assert (psize >= 0); } if (header_.has_annt()) { annt_ = new DataSetIn(); gu_trace(annt_->init(dver, pptr, psize)); // we don't care for annotation checksum - it is not a reason // to throw an exception and abort execution // gu_trace(annt_->checksum()); #ifndef NDEBUG psize -= annt_->serial_size(); #endif } } #ifndef NDEBUG assert (psize >= 0); assert (size_t(psize) < gcache::MemOps::ALIGNMENT); #endif check_ = true; } catch (std::exception& e) { log_error << e.what(); } catch (...) { log_error << "Non-standard exception in WriteSet::checksum()"; } } void WriteSetIn::write_annotation(std::ostream& os) const { annt_->rewind(); ssize_t const count(annt_->count()); for (ssize_t i = 0; os.good() && i < count; ++i) { gu::Buf abuf = annt_->next(); const char* const astr(static_cast(abuf.ptr)); if (abuf.size > 0 && astr[0] != '\0') os.write(astr, abuf.size); } } size_t WriteSetIn::gather(GatherVector& out, bool include_keys, bool include_unrd) const { if (include_keys && include_unrd) { gu::Buf buf = { header_.ptr(), size_ }; out->push_back(buf); return size_; } else { out->reserve(out->size() + 4); gu::Buf buf(header_.copy(include_keys, include_unrd)); out->push_back(buf); size_t ret(buf.size); if (include_keys) { buf = keys_.buf(); out->push_back(buf); ret += buf.size; } buf = data_.buf(); out->push_back (buf); ret += buf.size; if (include_unrd) { buf = unrd_.buf(); out->push_back(buf); ret += buf.size; } if (annotated()) { buf = annt_->buf(); out->push_back (buf); ret += buf.size; } return ret; } } } /* namespace galera */ galera-26.4.3/galera/src/ist_proto.cpp0000664000177500017540000000131713540715002016130 0ustar dbartmy// // Copyright (C) 2015 Codership Oy // #include "ist_proto.hpp" std::ostream& galera::ist::operator<< (std::ostream& os, const Message& m) { os << "ver: " << m.version() << ", type: " << m.type() << ", flags: " << m.flags() << ", ctrl: " << m.ctrl() << ", len: " << m.len() << ", seqno: " << m.seqno(); return os; } void galera::ist::Message::throw_invalid_version(uint8_t const v) { gu_throw_error(EPROTO) << "invalid protocol version " << int(v) << ", expected " << version_; } void galera::ist::Message::throw_corrupted_header() { gu_throw_error(EINVAL) << "Corrupted IST message header: " << *this; } galera-26.4.3/galera/src/key_entry_ng.hpp0000664000177500017540000000657013540715002016616 0ustar dbartmy// // Copyright (C) 2013-2018 Codership Oy // #ifndef GALERA_KEY_ENTRY_NG_HPP #define GALERA_KEY_ENTRY_NG_HPP #include "trx_handle.hpp" namespace galera { class KeyEntryNG { public: KeyEntryNG(const KeySet::KeyPart& key) : refs_(), key_(key) { std::fill(&refs_[0], &refs_[KeySet::Key::TYPE_MAX], static_cast(NULL)); } KeyEntryNG(const KeyEntryNG& other) : refs_(), key_(other.key_) { std::copy(&other.refs_[0], &other.refs_[KeySet::Key::TYPE_MAX], &refs_[0]); } const KeySet::KeyPart& key() const { return key_; } void ref(wsrep_key_type_t p, const KeySet::KeyPart& k, TrxHandleSlave* trx) { assert(0 == refs_[p] || refs_[p]->global_seqno() <= trx->global_seqno()); refs_[p] = trx; key_ = k; } void unref(wsrep_key_type_t p, TrxHandleSlave* trx) { assert(refs_[p] != NULL); if (refs_[p] == trx) { refs_[p] = NULL; } else { assert(refs_[p]->global_seqno() > trx->global_seqno()); assert(0); } } bool referenced() const { bool ret(refs_[0] != NULL); for (int i(1); false == ret && i <= KeySet::Key::TYPE_MAX; ++i) { ret = (refs_[i] != NULL); } return ret; } const TrxHandleSlave* ref_trx(wsrep_key_type_t const p) const { return refs_[p]; } size_t size() const { return sizeof(*this); } void swap(KeyEntryNG& other) throw() { using std::swap; gu::swap_array(refs_, other.refs_); swap(key_, other.key_); } KeyEntryNG& operator=(KeyEntryNG ke) { swap(ke); return *this; } ~KeyEntryNG() { assert(!referenced()); } private: TrxHandleSlave* refs_[KeySet::Key::TYPE_MAX + 1]; KeySet::KeyPart key_; #ifndef NDEBUG void assert_ref(KeySet::Key::Prefix, TrxHandleSlave*) const; void assert_unref(KeySet::Key::Prefix, TrxHandleSlave*) const; #endif /* NDEBUG */ }; inline void swap(KeyEntryNG& a, KeyEntryNG& b) { a.swap(b); } class KeyEntryHashNG { public: size_t operator()(const KeyEntryNG& ke) const { return ke.key().hash(); } }; class KeyEntryPtrHashNG { public: size_t operator()(const KeyEntryNG* const ke) const { return ke->key().hash(); } }; class KeyEntryEqualNG { public: bool operator()(const KeyEntryNG& left, const KeyEntryNG& right) const { return left.key().matches(right.key()); } }; class KeyEntryPtrEqualNG { public: bool operator()(const KeyEntryNG* const left, const KeyEntryNG* const right) const { return left->key().matches(right->key()); } }; } #endif // GALERA_KEY_ENTRY_HPP galera-26.4.3/galera/src/key_data.hpp0000664000177500017540000000223713540715002015676 0ustar dbartmy// // Copyright (C) 2013-2018 Codership Oy // #ifndef GALERA_KEY_DATA_HPP #define GALERA_KEY_DATA_HPP #include "wsrep_api.h" #include namespace galera { struct KeyData { const wsrep_buf_t* const parts; long const parts_num; int const proto_ver; wsrep_key_type_t const type; bool const copy; KeyData (int const pv, const wsrep_buf_t* const k, long const kn, wsrep_key_type_t const tp, bool const cp) : parts (k), parts_num (kn), proto_ver (pv), type (tp), copy (cp) {} KeyData (const KeyData& kd) : parts (kd.parts), parts_num(kd.parts_num), proto_ver(kd.proto_ver), type (kd.type), copy (kd.copy) {} bool shared() const { return type == WSREP_KEY_SHARED; } void print(std::ostream& os) const; private: KeyData& operator = (const KeyData&); }; /* struct KeyData */ inline std::ostream& operator << (std::ostream& os, const KeyData& kd) { kd.print(os); return os; } } /* namespace galera */ #endif /* GALERA_KEY_DATA_HPP */ galera-26.4.3/galera/src/ist.cpp0000664000177500017540000006715113540715002014715 0ustar dbartmy// // Copyright (C) 2011-2019 Codership Oy // #include "ist.hpp" #include "ist_proto.hpp" #include "gu_logger.hpp" #include "gu_uri.hpp" #include "gu_debug_sync.hpp" #include "gu_progress.hpp" #include "galera_common.hpp" #include #include #include namespace { static std::string const CONF_KEEP_KEYS ("ist.keep_keys"); static bool const CONF_KEEP_KEYS_DEFAULT (true); } namespace galera { namespace ist { class AsyncSender : public Sender { public: AsyncSender(const gu::Config& conf, const std::string& peer, wsrep_seqno_t first, wsrep_seqno_t last, wsrep_seqno_t preload_start, AsyncSenderMap& asmap, int version) : Sender (conf, asmap.gcache(), peer, version), conf_ (conf), peer_ (peer), first_ (first), last_ (last), preload_start_(preload_start), asmap_ (asmap), thread_() { } const gu::Config& conf() { return conf_; } const std::string& peer() const { return peer_; } wsrep_seqno_t first() const { return first_; } wsrep_seqno_t last() const { return last_; } wsrep_seqno_t preload_start() const { return preload_start_; } AsyncSenderMap& asmap() { return asmap_; } gu_thread_t thread() { return thread_; } private: friend class AsyncSenderMap; const gu::Config& conf_; std::string const peer_; wsrep_seqno_t const first_; wsrep_seqno_t const last_; wsrep_seqno_t const preload_start_; AsyncSenderMap& asmap_; gu_thread_t thread_; // GCC 4.8.5 on FreeBSD wants it AsyncSender(const AsyncSender&); AsyncSender& operator=(const AsyncSender&); }; } } std::string const galera::ist::Receiver::RECV_ADDR("ist.recv_addr"); std::string const galera::ist::Receiver::RECV_BIND("ist.recv_bind"); void galera::ist::register_params(gu::Config& conf) { conf.add(Receiver::RECV_ADDR); conf.add(Receiver::RECV_BIND); conf.add(CONF_KEEP_KEYS); } galera::ist::Receiver::Receiver(gu::Config& conf, gcache::GCache& gc, TrxHandleSlave::Pool& slave_pool, EventHandler& handler, const char* addr) : recv_addr_ (), recv_bind_ (), io_service_ (), acceptor_ (io_service_), ssl_ctx_ (io_service_, asio::ssl::context::sslv23), mutex_ (), cond_ (), first_seqno_ (WSREP_SEQNO_UNDEFINED), last_seqno_ (WSREP_SEQNO_UNDEFINED), current_seqno_(WSREP_SEQNO_UNDEFINED), conf_ (conf), gcache_ (gc), slave_pool_ (slave_pool), source_id_ (WSREP_UUID_UNDEFINED), handler_ (handler), thread_ (), error_code_ (0), version_ (-1), use_ssl_ (false), running_ (false), ready_ (false) { std::string recv_addr; std::string recv_bind; try { recv_bind = conf_.get(RECV_BIND); // no return } catch (gu::NotSet& e) {} try /* check if receive address is explicitly set */ { recv_addr = conf_.get(RECV_ADDR); return; } catch (gu::NotSet& e) {} /* if not, check the alternative. TODO: try to find from system. */ if (addr) { try { recv_addr = gu::URI(std::string("tcp://") + addr).get_host(); conf_.set(RECV_ADDR, recv_addr); } catch (gu::NotSet& e) {} } } galera::ist::Receiver::~Receiver() { } extern "C" void* run_receiver_thread(void* arg) { galera::ist::Receiver* receiver(static_cast(arg)); receiver->run(); return 0; } static std::string IST_determine_recv_addr (gu::Config& conf) { std::string recv_addr; try { recv_addr = conf.get(galera::ist::Receiver::RECV_ADDR); } catch (gu::NotFound&) { try { recv_addr = conf.get(galera::BASE_HOST_KEY); } catch (gu::NotSet&) { gu_throw_error(EINVAL) << "Could not determine IST receinve address: '" << galera::ist::Receiver::RECV_ADDR << "' not set."; } } /* check if explicit scheme is present */ if (recv_addr.find("://") == std::string::npos) { bool ssl(false); try { std::string ssl_key = conf.get(gu::conf::ssl_key); if (ssl_key.length() != 0) ssl = true; } catch (gu::NotSet&) {} if (ssl) recv_addr.insert(0, "ssl://"); else recv_addr.insert(0, "tcp://"); } gu::URI ra_uri(recv_addr); if (!conf.has(galera::BASE_HOST_KEY)) conf.set(galera::BASE_HOST_KEY, ra_uri.get_host()); try /* check for explicit port, TODO: make it possible to use any free port (explicit 0?) */ { ra_uri.get_port(); } catch (gu::NotSet&) /* use gmcast listen port + 1 */ { int port(0); try { port = gu::from_string(conf.get(galera::BASE_PORT_KEY)); } catch (...) { port = gu::from_string(galera::BASE_PORT_DEFAULT); } port += 1; recv_addr += ":" + gu::to_string(port); } log_info << "IST receiver addr using " << recv_addr; return recv_addr; } static std::string IST_determine_recv_bind(gu::Config& conf) { std::string recv_bind; recv_bind = conf.get(galera::ist::Receiver::RECV_BIND); /* check if explicit scheme is present */ if (recv_bind.find("://") == std::string::npos) { bool ssl(false); try { std::string ssl_key = conf.get(gu::conf::ssl_key); if (ssl_key.length() != 0) ssl = true; } catch (gu::NotSet&) { } if (ssl) recv_bind.insert(0, "ssl://"); else recv_bind.insert(0, "tcp://"); } gu::URI rb_uri(recv_bind); try /* check for explicit port, TODO: make it possible to use any free port (explicit 0?) */ { rb_uri.get_port(); } catch (gu::NotSet&) /* use gmcast listen port + 1 */ { int port(0); try { port = gu::from_string(conf.get(galera::BASE_PORT_KEY)); } catch (...) { port = gu::from_string(galera::BASE_PORT_DEFAULT); } port += 1; recv_bind += ":" + gu::to_string(port); } log_info << "IST receiver bind using " << recv_bind; return recv_bind; } std::string galera::ist::Receiver::prepare(wsrep_seqno_t const first_seqno, wsrep_seqno_t const last_seqno, int const version, const wsrep_uuid_t& source_id) { ready_ = false; version_ = version; source_id_ = source_id; recv_addr_ = IST_determine_recv_addr(conf_); try { recv_bind_ = IST_determine_recv_bind(conf_); } catch (gu::NotSet&) { recv_bind_ = recv_addr_; } gu::URI const uri_addr(recv_addr_); gu::URI const uri_bind(recv_bind_); try { if (uri_addr.get_scheme() == "ssl") { log_info << "IST receiver using ssl"; use_ssl_ = true; // Protocol versions prior 7 had a bug on sender side // which made sender to return null cert in handshake. // Therefore peer cert verfification must be enabled // only at protocol version 7 or higher. gu::ssl_prepare_context(conf_, ssl_ctx_, version >= 7); } asio::ip::tcp::resolver resolver(io_service_); asio::ip::tcp::resolver::query query(gu::unescape_addr(uri_bind.get_host()), uri_bind.get_port(), asio::ip::tcp::resolver::query::flags(0)); asio::ip::tcp::resolver::iterator i(resolver.resolve(query)); acceptor_.open(i->endpoint().protocol()); acceptor_.set_option(asio::ip::tcp::socket::reuse_address(true)); gu::set_fd_options(acceptor_); acceptor_.bind(*i); acceptor_.listen(); // read recv_addr_ from acceptor_ in case zero port was specified recv_addr_ = uri_addr.get_scheme() + "://" + uri_addr.get_host() + ":" + gu::to_string(acceptor_.local_endpoint().port()); } catch (asio::system_error& e) { recv_addr_ = ""; gu_throw_error(e.code().value()) << "Failed to open IST listener at " << uri_bind.to_string() << "', asio error '" << e.what() << "'"; } first_seqno_ = first_seqno; last_seqno_ = last_seqno; int err; if ((err = gu_thread_create(&thread_, 0, &run_receiver_thread, this)) != 0) { recv_addr_ = ""; gu_throw_error(err) << "Unable to create receiver thread"; } running_ = true; log_info << "Prepared IST receiver for " << first_seqno << '-' << last_seqno << ", listening at: " << (uri_bind.get_scheme() + "://" + gu::escape_addr(acceptor_.local_endpoint().address()) + ":" + gu::to_string(acceptor_.local_endpoint().port())); return recv_addr_; } void galera::ist::Receiver::run() { asio::ip::tcp::socket socket(io_service_); asio::ssl::stream ssl_stream(io_service_, ssl_ctx_); try { if (use_ssl_ == true) { acceptor_.accept(ssl_stream.lowest_layer()); gu::set_fd_options(ssl_stream.lowest_layer()); ssl_stream.handshake( asio::ssl::stream::server); } else { acceptor_.accept(socket); gu::set_fd_options(socket); } } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "accept() failed" << "', asio error '" << e.what() << "': " << gu::extra_error_info(e.code()); } acceptor_.close(); /* shall be initialized below, when we know at what seqno preload starts */ gu::Progress* progress(NULL); int ec(0); try { bool const keep_keys(conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); Proto p(gcache_, version_, keep_keys); if (use_ssl_ == true) { p.send_handshake(ssl_stream); p.recv_handshake_response(ssl_stream); p.send_ctrl(ssl_stream, Ctrl::C_OK); } else { p.send_handshake(socket); p.recv_handshake_response(socket); p.send_ctrl(socket, Ctrl::C_OK); } // wait for SST to complete so that we know what is the first_seqno_ { gu::Lock lock(mutex_); while (ready_ == false) { lock.wait(cond_); } } log_info << "####### IST applying starts with " << first_seqno_; //remove assert(first_seqno_ > 0); bool preload_started(false); current_seqno_ = WSREP_SEQNO_UNDEFINED; while (true) { std::pair ret; if (use_ssl_ == true) { p.recv_ordered(ssl_stream, ret); } else { p.recv_ordered(socket, ret); } gcs_action& act(ret.first); // act type GCS_ACT_UNKNOWN denotes EOF if (gu_unlikely(act.type == GCS_ACT_UNKNOWN)) { assert(0 == act.seqno_g); assert(NULL == act.buf); assert(0 == act.size); log_debug << "eof received, closing socket"; break; } assert(act.seqno_g > 0); if (gu_unlikely(WSREP_SEQNO_UNDEFINED == current_seqno_)) { assert(!progress); if (act.seqno_g > first_seqno_) { log_error << "IST started with wrong seqno: " << act.seqno_g << ", expected <= " << first_seqno_; ec = EINVAL; goto err; } log_info << "####### IST current seqno initialized to " << act.seqno_g; current_seqno_ = act.seqno_g; progress = new gu::Progress( "Receiving IST", " events", last_seqno_ - current_seqno_ + 1, /* The following means reporting progress NO MORE frequently * than once per BOTH 10 seconds (default) and 16 events */ 16); } else { assert(progress); ++current_seqno_; progress->update(1); } if (act.seqno_g != current_seqno_) { log_error << "Unexpected action seqno: " << act.seqno_g << " expected: " << current_seqno_; ec = EINVAL; goto err; } assert(current_seqno_ > 0); assert(current_seqno_ == act.seqno_g); assert(act.type != GCS_ACT_UNKNOWN); bool const must_apply(current_seqno_ >= first_seqno_); bool const preload(ret.second); if (gu_unlikely(preload == true && preload_started == false)) { log_info << "IST preload starting at " << current_seqno_; preload_started = true; } switch (act.type) { case GCS_ACT_WRITESET: { TrxHandleSlavePtr ts( TrxHandleSlavePtr(TrxHandleSlave::New(false, slave_pool_), TrxHandleSlaveDeleter())); if (act.size > 0) { gu_trace(ts->unserialize(act)); ts->set_local(false); assert(ts->global_seqno() == act.seqno_g); assert(ts->depends_seqno() >= 0 || ts->nbo_end()); assert(ts->action().first && ts->action().second); // Checksum is verified later on } else { ts->set_global_seqno(act.seqno_g); ts->mark_dummy(__LINE__); } //log_info << "####### Passing WS " << act.seqno_g; handler_.ist_trx(ts, must_apply, preload); break; } case GCS_ACT_CCHANGE: //log_info << "####### Passing IST CC " << act.seqno_g // << ", must_apply: " << must_apply // << ", preload: " << preload; handler_.ist_cc(act, must_apply, preload); break; default: assert(0); } } progress->finish(); } catch (asio::system_error& e) { log_error << "got asio system error while reading IST stream: " << e.code(); ec = e.code().value(); } catch (gu::Exception& e) { ec = e.get_errno(); if (ec != EINTR) { log_error << "got exception while reading IST stream: " << e.what(); } } err: gcache_.seqno_unlock(); delete progress; gu::Lock lock(mutex_); if (use_ssl_ == true) { ssl_stream.lowest_layer().close(); // ssl_stream.shutdown(); } else { socket.close(); } running_ = false; if (last_seqno_ > 0 && ec != EINTR && current_seqno_ < last_seqno_) { log_error << "IST didn't contain all write sets, expected last: " << last_seqno_ << " last received: " << current_seqno_; ec = EPROTO; } if (ec != EINTR) { error_code_ = ec; } handler_.ist_end(ec); } void galera::ist::Receiver::ready(wsrep_seqno_t const first) { assert(first > 0); gu::Lock lock(mutex_); first_seqno_ = first; ready_ = true; cond_.signal(); } wsrep_seqno_t galera::ist::Receiver::finished() { if (recv_addr_ == "") { log_debug << "IST was not prepared before calling finished()"; } else { interrupt(); int err; if ((err = gu_thread_join(thread_, 0)) != 0) { log_warn << "Failed to join IST receiver thread: " << err; } acceptor_.close(); gu::Lock lock(mutex_); running_ = false; recv_addr_ = ""; } return current_seqno_; } void galera::ist::Receiver::interrupt() { gu::URI uri(recv_addr_); try { asio::ip::tcp::resolver::iterator i; try { asio::ip::tcp::resolver resolver(io_service_); asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); i = resolver.resolve(query); } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "failed to resolve host '" << uri.to_string() << "', asio error '" << e.what() << "'"; } if (use_ssl_ == true) { asio::ssl::stream ssl_stream(io_service_, ssl_ctx_); ssl_stream.lowest_layer().connect(*i); gu::set_fd_options(ssl_stream.lowest_layer()); ssl_stream.handshake(asio::ssl::stream::client); Proto p(gcache_, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); p.recv_handshake(ssl_stream); p.send_ctrl(ssl_stream, Ctrl::C_EOF); p.recv_ctrl(ssl_stream); } else { asio::ip::tcp::socket socket(io_service_); socket.connect(*i); gu::set_fd_options(socket); Proto p(gcache_, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); p.recv_handshake(socket); p.send_ctrl(socket, Ctrl::C_EOF); p.recv_ctrl(socket); } } catch (asio::system_error& e) { // ignore } } galera::ist::Sender::Sender(const gu::Config& conf, gcache::GCache& gcache, const std::string& peer, int version) : io_service_(), socket_ (io_service_), ssl_ctx_ (io_service_, asio::ssl::context::sslv23), ssl_stream_(0), conf_ (conf), gcache_ (gcache), version_ (version), use_ssl_ (false) { gu::URI uri(peer); try { asio::ip::tcp::resolver resolver(io_service_); asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); asio::ip::tcp::resolver::iterator i(resolver.resolve(query)); if (uri.get_scheme() == "ssl") { use_ssl_ = true; } if (use_ssl_ == true) { log_info << "IST sender using ssl"; ssl_prepare_context(conf, ssl_ctx_); // ssl_stream must be created after ssl_ctx_ is prepared... ssl_stream_ = new asio::ssl::stream( io_service_, ssl_ctx_); ssl_stream_->lowest_layer().connect(*i); gu::set_fd_options(ssl_stream_->lowest_layer()); ssl_stream_->handshake(asio::ssl::stream::client); } else { socket_.connect(*i); gu::set_fd_options(socket_); } } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "IST sender, failed to connect '" << peer.c_str() << "': " << e.what(); } } galera::ist::Sender::~Sender() { if (use_ssl_ == true) { ssl_stream_->lowest_layer().close(); delete ssl_stream_; } else { socket_.close(); } gcache_.seqno_unlock(); } template void send_eof(galera::ist::Proto& p, S& stream) { p.send_ctrl(stream, galera::ist::Ctrl::C_EOF); // wait until receiver closes the connection try { gu::byte_t b; size_t n; n = asio::read(stream, asio::buffer(&b, 1)); if (n > 0) { log_warn << "received " << n << " bytes, expected none"; } } catch (asio::system_error& e) { } } void galera::ist::Sender::send(wsrep_seqno_t first, wsrep_seqno_t last, wsrep_seqno_t preload_start) { if (first > last) { if (version_ < VER40) { assert(0); gu_throw_error(EINVAL) << "sender send first greater than last: " << first << " > " << last ; } } try { Proto p(gcache_, version_, conf_.get(CONF_KEEP_KEYS, CONF_KEEP_KEYS_DEFAULT)); int32_t ctrl; if (use_ssl_ == true) { p.recv_handshake(*ssl_stream_); p.send_handshake_response(*ssl_stream_); ctrl = p.recv_ctrl(*ssl_stream_); } else { p.recv_handshake(socket_); p.send_handshake_response(socket_); ctrl = p.recv_ctrl(socket_); } if (ctrl < 0) { gu_throw_error(EPROTO) << "IST handshake failed, peer reported error: " << ctrl; } // send eof even if the set or transactions sent would be empty if (first > last || (first == 0 && last == 0)) { log_info << "IST sender notifying joiner, not sending anything"; if (use_ssl_ == true) { send_eof(p, *ssl_stream_); } else { send_eof(p, socket_); } return; } else { log_info << "IST sender " << first << " -> " << last; } std::vector buf_vec( std::min(static_cast(last - first + 1), static_cast(1024))); ssize_t n_read; while ((n_read = gcache_.seqno_get_buffers(buf_vec, first)) > 0) { GU_DBUG_SYNC_WAIT("ist_sender_send_after_get_buffers"); //log_info << "read " << first << " + " << n_read << " from gcache"; for (wsrep_seqno_t i(0); i < n_read; ++i) { // Preload start is the seqno of the lowest trx in // cert index at CC. If the cert index was completely // reset, preload_start will be zero and no preload flag // should be set. bool preload_flag(preload_start > 0 && buf_vec[i].seqno_g() >= preload_start); //log_info << "Sender::send(): seqno " << buf_vec[i].seqno_g() // << ", size " << buf_vec[i].size() << ", preload: " // << preload_flag; if (use_ssl_ == true) { p.send_ordered(*ssl_stream_, buf_vec[i], preload_flag); } else { p.send_ordered(socket_, buf_vec[i], preload_flag); } if (buf_vec[i].seqno_g() == last) { if (use_ssl_ == true) { send_eof(p, *ssl_stream_); } else { send_eof(p, socket_); } return; } } first += n_read; // resize buf_vec to avoid scanning gcache past last size_t next_size(std::min(static_cast(last - first + 1), static_cast(1024))); if (buf_vec.size() != next_size) { buf_vec.resize(next_size); } } } catch (asio::system_error& e) { gu_throw_error(e.code().value()) << "ist send failed: " << e.code() << "', asio error '" << e.what() << "'"; } } extern "C" void* run_async_sender(void* arg) { galera::ist::AsyncSender* as (reinterpret_cast(arg)); log_info << "async IST sender starting to serve " << as->peer().c_str() << " sending " << as->first() << "-" << as->last(); wsrep_seqno_t join_seqno; try { as->send(as->first(), as->last(), as->preload_start()); join_seqno = as->last(); } catch (gu::Exception& e) { log_error << "async IST sender failed to serve " << as->peer().c_str() << ": " << e.what(); join_seqno = -e.get_errno(); } catch (...) { log_error << "async IST sender, failed to serve " << as->peer().c_str(); throw; } try { as->asmap().remove(as, join_seqno); gu_thread_detach(as->thread()); delete as; } catch (gu::NotFound& nf) { log_debug << "async IST sender already removed"; } log_info << "async IST sender served"; return 0; } void galera::ist::AsyncSenderMap::run(const gu::Config& conf, const std::string& peer, wsrep_seqno_t const first, wsrep_seqno_t const last, wsrep_seqno_t const preload_start, int const version) { gu::Critical crit(monitor_); AsyncSender* as(new AsyncSender(conf, peer, first, last, preload_start, *this, version)); int err(gu_thread_create(&as->thread_, 0, &run_async_sender, as)); if (err != 0) { delete as; gu_throw_error(err) << "failed to start sender thread"; } senders_.insert(as); } void galera::ist::AsyncSenderMap::remove(AsyncSender* as, wsrep_seqno_t seqno) { gu::Critical crit(monitor_); std::set::iterator i(senders_.find(as)); if (i == senders_.end()) { throw gu::NotFound(); } senders_.erase(i); } void galera::ist::AsyncSenderMap::cancel() { gu::Critical crit(monitor_); while (senders_.empty() == false) { AsyncSender* as(*senders_.begin()); senders_.erase(*senders_.begin()); int err; as->cancel(); monitor_.leave(); if ((err = gu_thread_join(as->thread_, 0)) != 0) { log_warn << "thread_join() failed: " << err; } monitor_.enter(); delete as; } } galera-26.4.3/galera/src/galera_view.hpp0000664000177500017540000000234313540715002016400 0ustar dbartmy// // Copyright (C) 2015 Codership Oy // /*! @file galera_view.hpp * * Helper class and methods for manipulating views in galera code. */ #ifndef GALERA_VIEW_HPP #define GALERA_VIEW_HPP #include "wsrep_api.h" // for wsrep_view_info_t #include "gu_uuid.hpp" #include static inline bool operator<(const wsrep_uuid_t& lhs, const wsrep_uuid_t& rhs) { return (memcmp(lhs.data, rhs.data, sizeof(lhs.data)) < 0); } namespace galera { class View { public: class UUIDCmp { public: bool operator()(const wsrep_uuid_t& lhs, const wsrep_uuid_t& rhs) const { return (lhs < rhs); } }; // Convenience typedef for member set typedef std::set MembSet; // Default constructor View(); // Construct View from wsrep_view_info_t View(const wsrep_view_info_t&); // Destructor ~View(); // Return true if the members of the view are subset of // other MembSet. bool subset_of(const MembSet& other) const; private: MembSet members_; // members set }; } #endif // GALERA_VIEW_HPP galera-26.4.3/galera/src/replicator_str.cpp0000664000177500017540000012231513540715002017144 0ustar dbartmy// // Copyright (C) 2010-2019 Codership Oy // #include "replicator_smm.hpp" #include "galera_info.hpp" #include #include namespace galera { bool ReplicatorSMM::state_transfer_required(const wsrep_view_info_t& view_info, bool const rejoined) { if (rejoined) { assert(view_info.view >= 0); if (state_uuid_ == view_info.state_id.uuid) // common history { wsrep_seqno_t const group_seqno(view_info.state_id.seqno); wsrep_seqno_t const local_seqno(last_committed()); if (state_() >= S_JOINING) /* See #442 - S_JOINING should be a valid state here */ { if (str_proto_ver_ >= 3) return (local_seqno + 1 < group_seqno); // this CC will add 1 else return (local_seqno < group_seqno); } else { if ((str_proto_ver_ >= 3 && local_seqno >= group_seqno) || (str_proto_ver_ < 3 && local_seqno > group_seqno)) { close(); gu_throw_fatal << "Local state seqno (" << local_seqno << ") is greater than group seqno (" < 0) return req_ + offset + sizeof(uint32_t); else return 0; } ssize_t const len_; char* const req_; bool const own_; }; std::string const StateRequest_v1::MAGIC("STRv1"); #ifndef INT32_MAX #define INT32_MAX 0x7fffffff #endif StateRequest_v1::StateRequest_v1 ( const void* const sst_req, ssize_t const sst_req_len, const void* const ist_req, ssize_t const ist_req_len) : len_(MAGIC.length() + 1 + sizeof(uint32_t) + sst_req_len + sizeof(uint32_t) + ist_req_len), req_(static_cast(malloc(len_))), own_(true) { if (!req_) gu_throw_error (ENOMEM) << "Could not allocate state request v1"; if (sst_req_len > INT32_MAX || sst_req_len < 0) gu_throw_error (EMSGSIZE) << "SST request length (" << sst_req_len << ") unrepresentable"; if (ist_req_len > INT32_MAX || ist_req_len < 0) gu_throw_error (EMSGSIZE) << "IST request length (" << sst_req_len << ") unrepresentable"; char* ptr(req_); strcpy (ptr, MAGIC.c_str()); ptr += MAGIC.length() + 1; ptr += gu::serialize4(uint32_t(sst_req_len), ptr, 0); memcpy (ptr, sst_req, sst_req_len); ptr += sst_req_len; ptr += gu::serialize4(uint32_t(ist_req_len), ptr, 0); memcpy (ptr, ist_req, ist_req_len); assert ((ptr - req_) == (len_ - ist_req_len)); } // takes ownership over str buffer StateRequest_v1::StateRequest_v1 (const void* str, ssize_t str_len) : len_(str_len), req_(static_cast(const_cast(str))), own_(false) { if (sst_offset() + 2*sizeof(uint32_t) > size_t(len_)) { assert(0); gu_throw_error (EINVAL) << "State transfer request is too short: " << len_ << ", must be at least: " << (sst_offset() + 2*sizeof(uint32_t)); } if (strncmp (req_, MAGIC.c_str(), MAGIC.length())) { assert(0); gu_throw_error (EINVAL) << "Wrong magic signature in state request v1."; } if (sst_offset() + sst_len() + 2*sizeof(uint32_t) > size_t(len_)) { gu_throw_error (EINVAL) << "Malformed state request v1: sst length: " << sst_len() << ", total length: " << len_; } if (ist_offset() + ist_len() + sizeof(uint32_t) != size_t(len_)) { gu_throw_error (EINVAL) << "Malformed state request v1: parsed field " "length " << sst_len() << " is not equal to total request length " << len_; } } static ReplicatorSMM::StateRequest* read_state_request (const void* const req, size_t const req_len) { const char* const str(static_cast(req)); bool const v1(req_len > StateRequest_v1::MAGIC.length() && !strncmp(str, StateRequest_v1::MAGIC.c_str(), StateRequest_v1::MAGIC.length())); log_info << "Detected STR version: " << int(v1) << ", req_len: " << req_len << ", req: " << str; if (v1) { return (new StateRequest_v1(req, req_len)); } else { return (new StateRequest_v0(req, req_len)); } } class IST_request { public: IST_request() : peer_(), uuid_(), last_applied_(), group_seqno_() { } IST_request(const std::string& peer, const wsrep_uuid_t& uuid, wsrep_seqno_t last_applied, wsrep_seqno_t last_missing_seqno) : peer_(peer), uuid_(uuid), last_applied_(last_applied), group_seqno_(last_missing_seqno) { } const std::string& peer() const { return peer_ ; } const wsrep_uuid_t& uuid() const { return uuid_ ; } wsrep_seqno_t last_applied() const { return last_applied_; } wsrep_seqno_t group_seqno() const { return group_seqno_; } private: friend std::ostream& operator<<(std::ostream&, const IST_request&); friend std::istream& operator>>(std::istream&, IST_request&); std::string peer_; wsrep_uuid_t uuid_; wsrep_seqno_t last_applied_; wsrep_seqno_t group_seqno_; }; std::ostream& operator<<(std::ostream& os, const IST_request& istr) { return (os << istr.uuid_ << ":" << istr.last_applied_ << "-" << istr.group_seqno_ << "|" << istr.peer_); } std::istream& operator>>(std::istream& is, IST_request& istr) { char c; return (is >> istr.uuid_ >> c >> istr.last_applied_ >> c >> istr.group_seqno_ >> c >> istr.peer_); } static void get_ist_request(const ReplicatorSMM::StateRequest* str, IST_request* istr) { assert(str->ist_len()); std::string ist_str(static_cast(str->ist_req()), str->ist_len()); std::istringstream is(ist_str); is >> *istr; } static bool sst_is_trivial (const void* const req, size_t const len) { /* Check that the first string in request == ReplicatorSMM::TRIVIAL_SST */ size_t const trivial_len = strlen(ReplicatorSMM::TRIVIAL_SST) + 1; return (len >= trivial_len && !memcmp (req, ReplicatorSMM::TRIVIAL_SST, trivial_len)); } wsrep_seqno_t ReplicatorSMM::donate_sst(void* const recv_ctx, const StateRequest& streq, const wsrep_gtid_t& state_id, bool const bypass) { wsrep_buf_t const str = { streq.sst_req(), size_t(streq.sst_len()) }; wsrep_cb_status const err(sst_donate_cb_(app_ctx_, recv_ctx, &str, &state_id, NULL, bypass)); wsrep_seqno_t const ret (WSREP_CB_SUCCESS == err ? state_id.seqno : -ECANCELED); if (ret < 0) { log_error << "SST " << (bypass ? "bypass " : "") << "failed: " << err; } return ret; } void ReplicatorSMM::process_state_req(void* recv_ctx, const void* req, size_t req_size, wsrep_seqno_t const seqno_l, wsrep_seqno_t const donor_seq) { assert(recv_ctx != 0); assert(seqno_l > -1); assert(req != 0); StateRequest* const streq(read_state_request(req, req_size)); LocalOrder lo(seqno_l); gu_trace(local_monitor_.enter(lo)); apply_monitor_.drain(donor_seq); if (co_mode_ != CommitOrder::BYPASS) commit_monitor_.drain(donor_seq); state_.shift_to(S_DONOR); // somehow the following does not work, string is initialized beyond // the first \0: //std::string const req_str(static_cast(streq->sst_req()), // streq->sst_len()); // have to resort to C ways. char* const tmp(strndup(static_cast(streq->sst_req()), streq->sst_len())); std::string const req_str(tmp); free (tmp); bool const skip_state_transfer (sst_is_trivial(streq->sst_req(), streq->sst_len()) /* compatibility with older garbd, to be removed in * the next release (2.1)*/ || req_str == std::string(WSREP_STATE_TRANSFER_NONE) ); wsrep_seqno_t rcode (0); bool join_now = true; if (!skip_state_transfer) { if (streq->ist_len()) { IST_request istr; get_ist_request(streq, &istr); if (istr.uuid() == state_uuid_ && istr.last_applied() >= 0) { log_info << "IST request: " << istr; try { gcache_.seqno_lock(istr.last_applied() + 1); } catch(gu::NotFound& nf) { log_info << "IST first seqno " << istr.last_applied() + 1 << " not found from cache, falling back to SST"; // @todo: close IST channel explicitly goto full_sst; } if (streq->sst_len()) // if joiner is waiting for SST, notify it { wsrep_gtid_t const state_id = { istr.uuid(), istr.last_applied() }; rcode = donate_sst(recv_ctx, *streq, state_id, true); // we will join in sst_sent. join_now = false; } if (rcode >= 0) { wsrep_seqno_t const first ((str_proto_ver_ < 3 || cc_lowest_trx_seqno_ == 0) ? istr.last_applied() + 1 : std::min(cc_lowest_trx_seqno_, istr.last_applied()+1)); try { ist_senders_.run(config_, istr.peer(), first, cc_seqno_, cc_lowest_trx_seqno_, /* Historically IST messages versioned * with the global replicator protocol. * Need to keep it that way for backward * compatibility */ protocol_version_); } catch (gu::Exception& e) { log_error << "IST failed: " << e.what(); rcode = -e.get_errno(); } } else { log_error << "Failed to bypass SST"; } goto out; } } full_sst: if (cert_.nbo_size() > 0) { log_warn << "Non-blocking operation in progress, cannot donate SST"; rcode = -EAGAIN; } else if (streq->sst_len()) { assert(0 == rcode); wsrep_gtid_t const state_id = { state_uuid_, donor_seq }; if (str_proto_ver_ >= 3) { if (streq->version() > 0) { if (streq->ist_len() <= 0) { log_warn << "Joiner didn't provide IST connection info -" " cert. index preload impossible, bailing out."; rcode = -ENOMSG; goto out; } wsrep_seqno_t preload_start(cc_lowest_trx_seqno_); try { if (preload_start <= 0) { preload_start = cc_seqno_; } gcache_.seqno_lock(preload_start); } catch (gu::NotFound& nf) { log_warn << "Cert index preload first seqno " << preload_start << " not found from gcache (min available: " << gcache_.seqno_min() << ')'; rcode = -ENOMSG; goto out; } log_info << "Cert index preload: " << preload_start << " -> " << cc_seqno_; IST_request istr; get_ist_request(streq, &istr); // Send trxs to rebuild cert index. ist_senders_.run(config_, istr.peer(), preload_start, cc_seqno_, preload_start, /* Historically IST messages are versioned * with the global replicator protocol. * Need to keep it that way for backward * compatibility */ protocol_version_); } else /* streq->version() == 0 */ { log_info << "STR v0: assuming backup request, skipping " "cert. index preload."; } } rcode = donate_sst(recv_ctx, *streq, state_id, false); // we will join in sst_sent. join_now = false; } else { log_warn << "SST request is null, SST canceled."; rcode = -ECANCELED; } } out: delete streq; local_monitor_.leave(lo); if (join_now || rcode < 0) { gcs_.join(gu::GTID(state_uuid_, donor_seq), rcode); } } void ReplicatorSMM::prepare_for_IST (void*& ptr, ssize_t& len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t const last_needed) { assert(group_uuid != GU_UUID_NIL); // Up from STR protocol version 3 joiner is assumed to be able receive // some transactions to rebuild cert index, so IST receiver must be // prepared regardless of the group. wsrep_seqno_t last_applied(last_committed()); ist_event_queue_.reset(); if (state_uuid_ != group_uuid) { if (str_proto_ver_ < 3) { gu_throw_error (EPERM) << "Local state UUID (" << state_uuid_ << ") does not match group state UUID (" << group_uuid << ')'; } else { last_applied = -1; // to cause full SST } } else { assert(last_applied < last_needed); } if (last_applied < 0 && str_proto_ver_ < 3) { gu_throw_error (EPERM) << "Local state seqno is undefined"; } wsrep_seqno_t const first_needed(last_applied + 1); log_info << "####### IST uuid:" << state_uuid_ << " f: " << first_needed << ", l: " << last_needed << ", STRv: " << str_proto_ver_; //remove /* Historically IST messages are versioned with the global replicator * protocol. Need to keep it that way for backward compatibility */ std::string recv_addr(ist_receiver_.prepare(first_needed, last_needed, protocol_version_,source_id())); std::ostringstream os; /* NOTE: in case last_applied is -1, first_needed is 0, but first legal * cached seqno is 1 so donor will revert to SST anyways, as is required */ os << IST_request(recv_addr, state_uuid_, last_applied, last_needed); char* str = strdup (os.str().c_str()); // cppcheck-suppress nullPointer if (!str) gu_throw_error (ENOMEM) << "Failed to allocate IST buffer."; log_debug << "Prepared IST request: " << str; len = strlen(str) + 1; ptr = str; } ReplicatorSMM::StateRequest* ReplicatorSMM::prepare_state_request (const void* sst_req, ssize_t sst_req_len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t const last_needed_seqno) { try { // IF there are ongoing NBO, SST might not be possible because // ongoing NBO is blocking and waiting for NBO end events. // Therefore in precense of ongoing NBOs we set SST request // string to zero and hope that donor can serve IST. size_t const nbo_size(cert_.nbo_size()); if (nbo_size) { log_info << "Non-blocking operation is ongoing. " "Node can receive IST only."; sst_req = NULL; sst_req_len = 0; } switch (str_proto_ver_) { case 0: if (0 == sst_req_len) gu_throw_error(EPERM) << "SST is not possible."; return new StateRequest_v0 (sst_req, sst_req_len); case 1: case 2: case 3: { void* ist_req(0); ssize_t ist_req_len(0); try { gu_trace(prepare_for_IST (ist_req, ist_req_len, group_uuid, last_needed_seqno)); assert(ist_req_len > 0); assert(NULL != ist_req); } catch (gu::Exception& e) { log_warn << "Failed to prepare for incremental state transfer: " << e.what() << ". IST will be unavailable."; if (0 == sst_req_len) gu_throw_error(EPERM) << "neither SST nor IST is possible."; } StateRequest* ret = new StateRequest_v1 (sst_req, sst_req_len, ist_req, ist_req_len); free (ist_req); return ret; } default: gu_throw_fatal << "Unsupported STR protocol: " << str_proto_ver_; } } catch (std::exception& e) { log_fatal << "State Transfer Request preparation failed: " << e.what() << " Can't continue, aborting."; } catch (...) { log_fatal << "State Transfer Request preparation failed: " "unknown exception. Can't continue, aborting."; } abort(); } static bool retry_str(int ret) { return (ret == -EAGAIN || ret == -ENOTCONN); } void ReplicatorSMM::send_state_request (const StateRequest* const req) { long ret; long tries = 0; gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; if (req->ist_len()) { IST_request istr; get_ist_request(req, &istr); ist_uuid = istr.uuid(); ist_seqno = istr.last_applied(); } do { tries++; gcs_seqno_t seqno_l; ret = gcs_.request_state_transfer(str_proto_ver_, req->req(), req->len(), sst_donor_, gu::GTID(ist_uuid, ist_seqno),seqno_l); if (ret < 0) { if (!retry_str(ret)) { log_error << "Requesting state transfer failed: " << ret << "(" << strerror(-ret) << ")"; } else if (1 == tries) { log_info << "Requesting state transfer failed: " << ret << "(" << strerror(-ret) << "). " << "Will keep retrying every " << sst_retry_sec_ << " second(s)"; } } if (seqno_l != GCS_SEQNO_ILL) { /* Check that we're not running out of space in monitor. */ if (local_monitor_.would_block(seqno_l)) { log_error << "Slave queue grew too long while trying to " << "request state transfer " << tries << " time(s). " << "Please make sure that there is " << "at least one fully synced member in the group. " << "Application must be restarted."; ret = -EDEADLK; } else { // we are already holding local monitor LocalOrder lo(seqno_l); local_monitor_.self_cancel(lo); } } } while (retry_str(ret) && (usleep(sst_retry_sec_ * 1000000), true)); if (ret >= 0) { if (1 == tries) { log_info << "Requesting state transfer: success, donor: " << ret; } else { log_info << "Requesting state transfer: success after " << tries << " tries, donor: " << ret; } } else { sst_state_ = SST_REQ_FAILED; st_.set(state_uuid_, last_committed(), safe_to_bootstrap_); st_.mark_safe(); gu::Lock lock(closing_mutex_); if (!closing_ && state_() > S_CLOSED) { log_fatal << "State transfer request failed unrecoverably: " << -ret << " (" << strerror(-ret) << "). Most likely " << "it is due to inability to communicate with the " << "cluster primary component. Restart required."; abort(); } else { // connection is being closed, send failure is expected } } } void ReplicatorSMM::request_state_transfer (void* recv_ctx, const wsrep_uuid_t& group_uuid, wsrep_seqno_t const cc_seqno, const void* const sst_req, ssize_t const sst_req_len) { assert(sst_req_len >= 0); StateRequest* const req(prepare_state_request(sst_req, sst_req_len, group_uuid, cc_seqno)); gu::Lock sst_lock(sst_mutex_); sst_received_ = false; st_.mark_unsafe(); send_state_request(req); state_.shift_to(S_JOINING); sst_state_ = SST_WAIT; sst_seqno_ = WSREP_SEQNO_UNDEFINED; /* There are two places where we may need to adjust GCache. * This is the first one, which we can do while waiting for SST to complete. * Here we reset seqno map completely if we have different histories. * This MUST be done before IST starts. */ bool const first_reset (state_uuid_ /* GCache has */ != group_uuid /* current PC has */); if (first_reset) { log_info << "Resetting GCache seqno map due to different histories."; gcache_.seqno_reset(gu::GTID(group_uuid, cc_seqno)); } if (sst_req_len != 0) { if (sst_is_trivial(sst_req, sst_req_len)) { sst_uuid_ = group_uuid; sst_seqno_ = cc_seqno; sst_received_ = true; } else { while (false == sst_received_) sst_lock.wait(sst_cond_); } if (sst_uuid_ != group_uuid) { log_fatal << "Application received wrong state: " << "\n\tReceived: " << sst_uuid_ << "\n\tRequired: " << group_uuid; sst_state_ = SST_FAILED; log_fatal << "Application state transfer failed. This is " << "unrecoverable condition, restart required."; st_.set(sst_uuid_, sst_seqno_, safe_to_bootstrap_); st_.mark_safe(); abort(); } else { assert(sst_seqno_ != WSREP_SEQNO_UNDEFINED); /* There are two places where we may need to adjust GCache. * This is the second one. * Here we reset seqno map completely if we have gap in seqnos * between the received snapshot and current GCache contents. * This MUST be done before IST starts. */ // there may be possible optimization to this when cert index // transfer is implemented (it may close the gap), but not by much. if (!first_reset && (last_committed() /* GCache has */ != sst_seqno_ /* current state has */)) { log_info << "Resetting GCache seqno map due to seqno gap: " << last_committed() << ".." << sst_seqno_; gcache_.seqno_reset(gu::GTID(sst_uuid_, sst_seqno_)); } update_state_uuid (sst_uuid_); if (str_proto_ver_ < 3) { // all IST events will bypass certification gu::GTID const cert_position (sst_uuid_, std::max(cc_seqno, sst_seqno_)); cert_.assign_initial_position(cert_position, trx_params_.version_); // with higher versions this happens in cert index preload } apply_monitor_.set_initial_position(WSREP_UUID_UNDEFINED, -1); apply_monitor_.set_initial_position(sst_uuid_, sst_seqno_); if (co_mode_ != CommitOrder::BYPASS) { commit_monitor_.set_initial_position(WSREP_UUID_UNDEFINED, -1); commit_monitor_.set_initial_position(sst_uuid_, sst_seqno_); } log_info << "Installed new state from SST: " << state_uuid_ << ":" << sst_seqno_; } } else { assert (state_uuid_ == group_uuid); sst_seqno_ = last_committed(); } if (st_.corrupt()) { if (sst_req_len != 0 && !sst_is_trivial(sst_req, sst_req_len)) { st_.mark_uncorrupt(sst_uuid_, sst_seqno_); } else { log_fatal << "Application state is corrupt and cannot " << "be recorvered. Restart required."; abort(); } } else { st_.mark_safe(); } if (req->ist_len() > 0) { if (state_uuid_ != group_uuid) { log_fatal << "Sanity check failed: my state UUID " << state_uuid_ << " is different from group state UUID " << group_uuid << ". Can't continue with IST. Aborting."; st_.set(state_uuid_, last_committed(), safe_to_bootstrap_); st_.mark_safe(); abort(); } // IST is prepared only with str proto ver 1 and above // IST is *always* prepared at str proto ver 3 or higher if (last_committed() < cc_seqno || str_proto_ver_ >= 3) { wsrep_seqno_t const ist_from(last_committed() + 1); wsrep_seqno_t const ist_to(cc_seqno); bool const do_ist(ist_from > 0 && ist_from <= ist_to); if (do_ist) { log_info << "Receiving IST: " << (ist_to - ist_from + 1) << " writesets, seqnos " << ist_from << "-" << ist_to; } else { log_info << "Cert. index preload up to " << ist_from - 1; } ist_receiver_.ready(ist_from); recv_IST(recv_ctx); wsrep_seqno_t const ist_seqno(ist_receiver_.finished()); if (do_ist) { assert(ist_seqno > sst_seqno_); // must exceed sst_seqno_ sst_seqno_ = ist_seqno; // Note: apply_monitor_ must be drained to avoid race between // IST appliers and GCS appliers, GCS action source may // provide actions that have already been applied via IST. apply_monitor_.drain(ist_seqno); } else { assert(sst_seqno_ > 0); // must have been esptablished via SST assert(ist_seqno >= cc_seqno); // index must be rebuilt up to assert(ist_seqno <= sst_seqno_); } if (ist_seqno == sst_seqno_) { log_info << "IST received: " << state_uuid_ << ":" <= cc_seqno); } #ifndef NDEBUG { gu::Lock lock(closing_mutex_); assert(sst_seqno_ >= cc_seqno || closing_ || state_() == S_CLOSED); } #endif /* NDEBUG */ delete req; } void ReplicatorSMM::process_IST_writeset(void* recv_ctx, const TrxHandleSlavePtr& ts_ptr) { TrxHandleSlave& ts(*ts_ptr); assert(ts.global_seqno() > 0); assert(ts.state() != TrxHandle::S_COMMITTED); assert(ts.state() != TrxHandle::S_ROLLED_BACK); bool const skip(ts.is_dummy()); if (gu_likely(!skip)) { ts.verify_checksum(); assert(ts.certified()); assert(ts.depends_seqno() >= 0); } else { assert(ts.is_dummy()); } gu_trace(apply_trx(recv_ctx, ts)); GU_DBUG_SYNC_WAIT("recv_IST_after_apply_trx"); if (gu_unlikely (gu::Logger::no_log(gu::LOG_DEBUG) == false)) { std::ostringstream os; if (gu_likely(!skip)) os << "IST received trx body: " << ts; else os << "IST skipping trx " << ts.global_seqno(); log_debug << os.str(); } } void ReplicatorSMM::recv_IST(void* recv_ctx) { ISTEvent::Type event_type(ISTEvent::T_NULL); TrxHandleSlavePtr ts; wsrep_view_info_t* view; try { bool exit_loop(false); while (exit_loop == false) { ISTEvent ev(ist_event_queue_.pop_front()); event_type = ev.type(); switch (event_type) { case ISTEvent::T_NULL: exit_loop = true; continue; case ISTEvent::T_TRX: ts = ev.ts(); assert(ts); process_IST_writeset(recv_ctx, ts); exit_loop = ts->exit_loop(); continue; case ISTEvent::T_VIEW: { view = ev.view(); wsrep_seqno_t const cs(view->state_id.seqno); submit_view_info(recv_ctx, view); ::free(view); CommitOrder co(cs, CommitOrder::NO_OOOC); commit_monitor_.leave(co); ApplyOrder ao(cs, cs - 1, false); apply_monitor_.leave(ao); GU_DBUG_SYNC_WAIT("recv_IST_after_conf_change"); continue; } } gu_throw_fatal << "Unrecognized event of type " << ev.type(); } } catch (gu::Exception& e) { std::ostringstream os; os << "Receiving IST failed, node restart required: " << e.what(); switch (event_type) { case ISTEvent::T_NULL: os << ". Null event."; break; case ISTEvent::T_TRX: if (ts) os << ". Failed writeset: " << *ts; else os << ". Corrupt IST event queue."; break; case ISTEvent::T_VIEW: os << ". VIEW event"; break; } log_fatal << os.str(); mark_corrupt_and_close(); } } void ReplicatorSMM::ist_trx(const TrxHandleSlavePtr& tsp, bool must_apply, bool preload) { assert(tsp != 0); TrxHandleSlave& ts(*tsp); assert(ts.depends_seqno() >= 0 || ts.state() == TrxHandle::S_ABORTING || ts.nbo_end()); assert(ts.local_seqno() == WSREP_SEQNO_UNDEFINED); ts.verify_checksum(); if (gu_unlikely(cert_.position() == WSREP_SEQNO_UNDEFINED)) { // This is the first pre IST event for rebuilding cert index cert_.assign_initial_position( /* proper UUID will be installed by CC */ gu::GTID(gu::UUID(), ts.global_seqno() - 1), ts.version()); } if (ts.nbo_start() || ts.nbo_end()) { if (must_apply) { ts.verify_checksum(); ts.set_state(TrxHandle::S_CERTIFYING); Certification::TestResult result(cert_.append_trx(tsp)); switch (result) { case Certification::TEST_OK: if (ts.nbo_end()) { // This is the same as in process_trx() if (ts.ends_nbo() == WSREP_SEQNO_UNDEFINED) { assert(ts.is_dummy()); } else { // Signal NBO waiter gu::shared_ptr::type nbo_ctx( cert_.nbo_ctx(ts.ends_nbo())); assert(nbo_ctx != 0); nbo_ctx->set_ts(tsp); return; // not pushing to queue below } } break; case Certification::TEST_FAILED: { assert(ts.nbo_end()); // non-effective nbo_end assert(ts.is_dummy()); break; } } /* regardless of certification outcome, event must be passed to * apply_trx() as it carries global seqno */ } else { // Skipping NBO events in preload is fine since joiner either // have all events applied in case of pure IST and donor refuses to // donate SST from the position there are NBOs going on. assert(preload); log_debug << "Skipping NBO event: " << ts; wsrep_seqno_t const pos(cert_.increment_position()); assert(ts.global_seqno() == pos); (void)pos; } #if 0 log_info << "\n IST processing NBO_" << (ts.nbo_start() ? "START(" : "END(") << ts.global_seqno() << ")" << (must_apply ? ", must apply" : ", skip") << ", ends NBO: " << ts.ends_nbo(); #endif } else { if (gu_unlikely(preload == true)) { if (gu_likely(!ts.is_dummy())) { ts.set_state(TrxHandle::S_CERTIFYING); Certification::TestResult result(cert_.append_trx(tsp)); if (result != Certification::TEST_OK) { gu_throw_fatal << "Pre IST trx append returned unexpected " << "certification result " << result << ", expected " << Certification::TEST_OK << "must abort to maintain consistency"; } // Mark trx committed for certification bookkeeping here // if it won't pass to applying stage if (!must_apply) cert_.set_trx_committed(ts); } else { wsrep_seqno_t const pos(cert_.increment_position()); assert(ts.global_seqno() == pos); (void)pos; } } else if (ts.state() == TrxHandle::S_REPLICATING) { ts.set_state(TrxHandle::S_CERTIFYING); } } if (gu_likely(must_apply == true)) { ist_event_queue_.push_back(tsp); } } void ReplicatorSMM::ist_end(int error) { ist_event_queue_.eof(error); } void ReplicatorSMM::ist_cc(const gcs_action& act, bool must_apply, bool preload) { assert(GCS_ACT_CCHANGE == act.type); assert(act.seqno_g > 0); gcs_act_cchange const conf(act.buf, act.size); assert(conf.conf_id >= 0); // Primary configuration assert(conf.seqno == act.seqno_g); wsrep_uuid_t uuid_undefined(WSREP_UUID_UNDEFINED); wsrep_view_info_t* const view_info( galera_view_info_create(conf, capabilities(conf.repl_proto_ver), -1, uuid_undefined)); if (gu_unlikely(cert_.position() == WSREP_SEQNO_UNDEFINED) && (must_apply || preload)) { // This is the first IST event for rebuilding cert index, // need to initialize certification establish_protocol_versions(conf.repl_proto_ver); cert_.assign_initial_position(gu::GTID(conf.uuid, conf.seqno - 1), trx_params_.version_); } if (must_apply == true) { process_conf_change(0, act); /* TO monitors need to be entered here to maintain critical * section over passing the view through the event queue to * an applier and ensure that the view is submitted in isolation. * Applier is to leave monitors and free the view after it is * submitted */ ApplyOrder ao(conf.seqno, conf.seqno - 1, false); apply_monitor_.enter(ao); CommitOrder co(conf.seqno, CommitOrder::NO_OOOC); commit_monitor_.enter(co); ist_event_queue_.push_back(view_info); } else { if (preload == true) { /* CC is part of index preload but won't be processed * by process_conf_change() * Order of these calls is essential: trx_params_.version_ may * be altered by establish_protocol_versions() */ establish_protocol_versions(conf.repl_proto_ver); cert_.adjust_position(*view_info, gu::GTID(conf.uuid, conf.seqno), trx_params_.version_); // record CC releated state seqnos, needed for IST on DONOR record_cc_seqnos(conf.seqno, "preload"); } ::free(view_info); } } } /* namespace galera */ galera-26.4.3/galera/src/replicator.hpp0000664000177500017540000001425413540715002016263 0ustar dbartmy// // Copyright (C) 2010-2017 Codership Oy // #ifndef GALERA_REPLICATOR_HPP #define GALERA_REPLICATOR_HPP #include "wsrep_api.h" #include "galera_exception.hpp" #include "trx_handle.hpp" struct gcs_action; #include #include namespace galera { class Statement; class RowId; //! @class Galera // // @brief Abstract Galera replicator interface class Replicator { public: struct Param { static std::string const debug_log; #ifdef GU_DBUG_ON static std::string const dbug; static std::string const signal; #endif // GU_DBUG_ON }; static const char* const TRIVIAL_SST; typedef enum { S_DESTROYED, S_CLOSED, S_CONNECTED, S_JOINING, S_JOINED, S_SYNCED, S_DONOR } State; Replicator() { } virtual ~Replicator() { } virtual wsrep_status_t connect(const std::string& cluster_name, const std::string& cluster_url, const std::string& state_donor, bool bootstrap) = 0; virtual wsrep_status_t close() = 0; virtual wsrep_status_t async_recv(void* recv_ctx) = 0; virtual wsrep_cap_t capabilities() const = 0; virtual int trx_proto_ver() const = 0; virtual int repl_proto_ver() const = 0; virtual TrxHandleMasterPtr get_local_trx(wsrep_trx_id_t, bool) = 0; virtual void discard_local_trx(TrxHandleMaster* trx_id) = 0; virtual TrxHandleMasterPtr local_conn_trx(wsrep_conn_id_t conn_id, bool create) = 0; virtual void discard_local_conn_trx(wsrep_conn_id_t conn_id) = 0; virtual wsrep_status_t replicate(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) = 0; virtual wsrep_status_t certify(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) = 0; virtual wsrep_status_t replay_trx(TrxHandleMaster& trx, TrxHandleLock& lock, void* replay_ctx) = 0; virtual wsrep_status_t abort_trx(TrxHandleMaster& trx, wsrep_seqno_t bf_seqno, wsrep_seqno_t* victim_seqno) = 0; virtual wsrep_status_t sync_wait(wsrep_gtid_t* upto, int tout, wsrep_gtid_t* gtid) = 0; virtual wsrep_status_t last_committed_id(wsrep_gtid_t* gtid) const = 0; virtual wsrep_status_t to_isolation_begin(TrxHandleMaster& trx, wsrep_trx_meta_t* meta) = 0; virtual wsrep_status_t to_isolation_end(TrxHandleMaster& trx, const wsrep_buf_t* err) = 0; virtual wsrep_status_t preordered_collect(wsrep_po_handle_t& handle, const struct wsrep_buf* data, size_t count, bool copy) = 0; virtual wsrep_status_t preordered_commit(wsrep_po_handle_t& handle, const wsrep_uuid_t& source, uint64_t flags, int pa_range, bool commit) =0; virtual wsrep_status_t sst_sent(const wsrep_gtid_t& state_id, int rcode) = 0; virtual wsrep_status_t sst_received(const wsrep_gtid_t& state_id, const wsrep_buf_t* state, int rcode) = 0; // action source interface virtual void process_trx(void* recv_ctx, const TrxHandleSlavePtr& trx) = 0; virtual void process_commit_cut(wsrep_seqno_t seq, wsrep_seqno_t seqno_l) = 0; virtual void process_conf_change(void* recv_ctx, const struct gcs_action& cc) = 0; virtual void process_state_req(void* recv_ctx, const void* req, size_t req_size, wsrep_seqno_t seqno_l, wsrep_seqno_t donor_seq) = 0; virtual void process_join(wsrep_seqno_t seqno, wsrep_seqno_t seqno_l) =0; virtual void process_sync(wsrep_seqno_t seqno_l) = 0; virtual void process_vote(wsrep_seqno_t seq, int64_t code, wsrep_seqno_t seqno_l) = 0; virtual const struct wsrep_stats_var* stats_get() const = 0; virtual void stats_reset() = 0; // static void stats_free(struct wsrep_stats_var*) must be declared in // the child class /*! @throws NotFound */ virtual void param_set (const std::string& key, const std::string& value) = 0; /*! @throws NotFound */ virtual std::string param_get (const std::string& key) const = 0; virtual const gu::Config& params() const = 0; virtual wsrep_seqno_t pause() = 0; virtual void resume() = 0; virtual void desync() = 0; virtual void resync() = 0; virtual const wsrep_uuid_t& source_id() const = 0; virtual void cancel_seqnos(wsrep_seqno_t seqno_l, wsrep_seqno_t seqno_g) = 0; virtual bool corrupt() const = 0; protected: static void register_params(gu::Config&); }; } #endif // GALERA_REPLICATOR_HPP galera-26.4.3/galera/src/SConscript0000664000177500017540000000373013540715002015415 0ustar dbartmy Import('env') libgaleraxx_env = env.Clone() # Include paths libgaleraxx_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcache/src #/gcs/src ''')) libgaleraxx_srcs = [ 'mapped_buffer.cpp', 'key_data.cpp', 'write_set.cpp', 'data_set.cpp', 'key_set.cpp', 'write_set_ng.cpp', 'trx_handle.cpp', 'key_entry_os.cpp', 'wsdb.cpp', 'certification.cpp', 'galera_service_thd.cpp', 'wsrep_params.cpp', 'replicator_smm_params.cpp', 'gcs_action_source.cpp', 'galera_info.cpp', 'replicator.cpp', 'ist_proto.cpp', 'ist.cpp', 'gcs_dummy.cpp', 'saved_state.cpp', 'galera_view.cpp' ] objs = libgaleraxx_env.Object(libgaleraxx_srcs) env.Append(LIBGALERA_OBJS = libgaleraxx_env.SharedObject(libgaleraxx_srcs)) # Environment for multimaster library build mmlib_env = libgaleraxx_env.Clone() mmlib_env.Append(CPPFLAGS = ' -DGALERA_MULTIMASTER') mmlib_env.Replace(SHOBJPREFIX = 'libmmgalera++-') mmlib_srcs = [ 'replicator_smm.cpp', 'replicator_str.cpp', 'replicator_smm_stats.cpp' ] mm_objs = mmlib_env.Object(mmlib_srcs) env.Append(LIBMMGALERA_OBJS = mmlib_env.SharedObject(mmlib_srcs)) # Environment to compile provider unit (part of multimaster library) # This is needed to hardcode version and revision mmprovider_env = mmlib_env.Clone() Import ('GALERA_VER', 'GALERA_REV') mmprovider_env.Append(CPPFLAGS = ' -DGALERA_VER=\\"' + GALERA_VER + '\\"') mmprovider_env.Append(CPPFLAGS = ' -DGALERA_REV=\\"' + GALERA_REV + '\\"') mmprovider_srcs = [ 'wsrep_provider.cpp' ] mm_objs += mmprovider_env.Object(mmprovider_srcs) env.Append(LIBMMGALERA_OBJS = mmprovider_env.SharedObject(mmprovider_srcs)) libgaleraxx_env.StaticLibrary('galera++', objs + mm_objs) galera-26.4.3/galera/src/certification.hpp0000664000177500017540000002213713540715002016741 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #ifndef GALERA_CERTIFICATION_HPP #define GALERA_CERTIFICATION_HPP #include "nbo.hpp" #include "trx_handle.hpp" #include "key_entry_ng.hpp" #include "galera_service_thd.hpp" #include "galera_view.hpp" #include #include #include #include #include #include #include namespace galera { class Certification { public: static std::string const PARAM_LOG_CONFLICTS; static std::string const PARAM_OPTIMISTIC_PA; static void register_params(gu::Config&); typedef gu::UnorderedSet CertIndex; typedef gu::UnorderedSet CertIndexNG; typedef gu::UnorderedMultiset CertIndexNBO; private: typedef std::multiset DepsSet; typedef std::map TrxMap; public: typedef enum { TEST_OK, TEST_FAILED } TestResult; Certification(gu::Config& conf, ServiceThd* thd); ~Certification(); void assign_initial_position(const gu::GTID& gtid, int version); TestResult append_trx(const TrxHandleSlavePtr&); TestResult test(const TrxHandleSlavePtr&, bool store_keys); wsrep_seqno_t position() const { return position_; } wsrep_seqno_t increment_position(); /* for dummy IST events */ /* this is for configuration change use */ void adjust_position(const View&, const gu::GTID& gtid, int version); wsrep_seqno_t get_safe_to_discard_seqno() const { gu::Lock lock(mutex_); return get_safe_to_discard_seqno_(); } wsrep_seqno_t purge_trxs_upto(wsrep_seqno_t const seqno, bool const handle_gcache) { gu::Lock lock(mutex_); const wsrep_seqno_t stds(get_safe_to_discard_seqno_()); // assert(seqno <= get_safe_to_discard_seqno()); // Note: setting trx committed is not done in total order so // safe to discard seqno may decrease. Enable assertion above when // this issue is fixed. return purge_trxs_upto_(std::min(seqno, stds), handle_gcache); } // Set trx corresponding to handle committed. Return purge seqno if // index purge is required, -1 otherwise. wsrep_seqno_t set_trx_committed(TrxHandleSlave&); // statistics section void stats_get(double& avg_cert_interval, double& avg_deps_dist, size_t& index_size) const { gu::Lock lock(stats_mutex_); avg_cert_interval = 0; avg_deps_dist = 0; if (n_certified_) { avg_cert_interval = double(cert_interval_) / n_certified_; avg_deps_dist = double(deps_dist_) / n_certified_; } index_size = index_size_; } void stats_reset() { gu::Lock lock(stats_mutex_); cert_interval_ = 0; deps_dist_ = 0; n_certified_ = 0; index_size_ = 0; } void param_set(const std::string& key, const std::string& value); wsrep_seqno_t lowest_trx_seqno() const { return (trx_map_.empty() ? position_ : trx_map_.begin()->first); } // // NBO context lifecycle: // * Context is created when NBO-start event is received // * Context stays in nbo_ctx_map_ until client calls erase_nbo_ctx() // // Get NBO context matching to global seqno gu::shared_ptr::type nbo_ctx(wsrep_seqno_t); // Erase NBO context entry void erase_nbo_ctx(wsrep_seqno_t); size_t nbo_size() const { return nbo_map_.size(); } void mark_inconsistent(); bool is_inconsistent() const { return inconsistent_; } private: // Non-copyable Certification(const Certification&); Certification& operator=(const Certification&); TestResult do_test(const TrxHandleSlavePtr&, bool store_keys); TestResult do_test_v3to5(TrxHandleSlave*, bool); TestResult do_test_preordered(TrxHandleSlave*); TestResult do_test_nbo(const TrxHandleSlavePtr&); void purge_for_trx(TrxHandleSlave*); // unprotected variants for internal use wsrep_seqno_t get_safe_to_discard_seqno_() const; wsrep_seqno_t purge_trxs_upto_(wsrep_seqno_t, bool sync); gu::shared_ptr::type nbo_ctx_unlocked(wsrep_seqno_t); bool index_purge_required() { static unsigned int const KEYS_THRESHOLD (1 << 10); // 1K static unsigned int const BYTES_THRESHOLD(128 << 20); // 128M static unsigned int const TRXS_THRESHOLD (127); /* if either key count, byte count or trx count exceed their * threshold, zero up counts and return true. */ return ((key_count_ > KEYS_THRESHOLD || byte_count_ > BYTES_THRESHOLD || trx_count_ > TRXS_THRESHOLD) && (key_count_ = 0, byte_count_ = 0, trx_count_ = 0, true)); } class PurgeAndDiscard { public: PurgeAndDiscard(Certification& cert) : cert_(cert) { } void operator()(TrxMap::value_type& vt) const { { TrxHandleSlave* trx(vt.second.get()); // Trying to lock trx mutex here may cause deadlock // with streaming replication. Locking can be skipped // because trx is only read here and refcount uses atomics. // Memory barrier is provided by certification mutex. // // TrxHandleLock lock(*trx); if (!cert_.is_inconsistent()) { assert(trx->is_committed() == true); if (trx->is_committed() == false) { log_warn <<"trx not committed in purge and discard: " << *trx; } } // If depends seqno is not WSREP_SEQNO_UNDEFINED // write set certification has passed and keys have been // inserted into index and purge is needed. // TOI write sets will always pass regular certification // and keys will be inserted, however if they fail // NBO certification depends seqno is set to // WSREP_SEQNO_UNDEFINED. Therefore purge should always // be done for TOI write sets. if (trx->depends_seqno() >= 0 || trx->is_toi() == true) { cert_.purge_for_trx(trx); } } } PurgeAndDiscard(const PurgeAndDiscard& other) : cert_(other.cert_) { } private: void operator=(const PurgeAndDiscard&); Certification& cert_; }; int version_; gu::Config& conf_; TrxMap trx_map_; CertIndexNG cert_index_ng_; NBOMap nbo_map_; NBOCtxMap nbo_ctx_map_; CertIndexNBO nbo_index_; TrxHandleSlave::Pool nbo_pool_; DepsSet deps_set_; View current_view_; ServiceThd* service_thd_; gu::Mutex mutex_; size_t trx_size_warn_count_; wsrep_seqno_t initial_position_; wsrep_seqno_t position_; wsrep_seqno_t nbo_position_; wsrep_seqno_t safe_to_discard_seqno_; wsrep_seqno_t last_pa_unsafe_; wsrep_seqno_t last_preordered_seqno_; wsrep_trx_id_t last_preordered_id_; gu::Mutex stats_mutex_; size_t n_certified_; wsrep_seqno_t deps_dist_; wsrep_seqno_t cert_interval_; size_t index_size_; size_t key_count_; size_t byte_count_; size_t trx_count_; /* The only reason those are not static constants is because * there might be a need to thange them without recompilation. * see #454 */ int const max_length_; /* Purge trx_map_ when it exceeds this * NOTE: this effectively sets a limit * on trx certification interval */ unsigned int const max_length_check_; /* Mask how often to check */ bool inconsistent_; bool log_conflicts_; bool optimistic_pa_; }; } #endif // GALERA_CERTIFICATION_HPP galera-26.4.3/galera/src/galera_gcs.hpp0000664000177500017540000003425413540715002016210 0ustar dbartmy// // Copyright (C) 2010-2014 Codership Oy // #ifndef GALERA_GCS_HPP #define GALERA_GCS_HPP #include "write_set_ng.hpp" #include "wsrep_api.h" #include "gcs.hpp" #include #include #include #include #include #include // for gu::Mutex and gu::Cond #include #include #define GCS_IMPL Gcs namespace galera { class GcsI { public: GcsI() {} virtual ~GcsI() {} virtual ssize_t connect(const std::string& cluster_name, const std::string& cluster_url, bool bootstrap) = 0; virtual ssize_t set_initial_position(const gu::GTID& gtid) = 0; virtual void close() = 0; virtual ssize_t recv(gcs_action& act) = 0; typedef WriteSetNG::GatherVector WriteSetVector; virtual ssize_t sendv(const WriteSetVector&, size_t, gcs_act_type_t, bool, bool) = 0; virtual ssize_t send (const void*, size_t, gcs_act_type_t, bool) = 0; virtual ssize_t replv(const WriteSetVector&, gcs_action& act, bool) = 0; virtual ssize_t repl (gcs_action& act, bool) = 0; virtual void caused(gu::GTID& gtid, gu::datetime::Date& wait_until) = 0; virtual ssize_t schedule() = 0; virtual ssize_t interrupt(ssize_t) = 0; virtual ssize_t resume_recv() = 0; virtual ssize_t request_state_transfer(int version, const void* req, ssize_t req_len, const std::string& sst_donor, const gu::GTID& ist_gtid, gcs_seqno_t& order) = 0; virtual ssize_t desync(gcs_seqno_t& seqno_l) = 0; virtual void join(const gu::GTID&, int code) = 0; virtual gcs_seqno_t local_sequence() = 0; virtual ssize_t set_last_applied(const gu::GTID&) = 0; virtual int vote(const gu::GTID& gtid, uint64_t code, const void* data, size_t data_len) = 0; virtual void get_stats(gcs_stats*) const = 0; virtual void flush_stats() = 0; virtual void get_status(gu::Status&) const = 0; /*! @throws NotFound */ virtual void param_set (const std::string& key, const std::string& value) = 0; /*! @throws NotFound */ virtual char* param_get (const std::string& key) const = 0; virtual size_t max_action_size() const = 0; }; class Gcs : public GcsI { public: Gcs(gu::Config& config, gcache::GCache& cache, int repl_proto_ver = 0, int appl_proto_ver = 0, const char* node_name = 0, const char* node_incoming = 0) : conn_(gcs_create(reinterpret_cast(&config), reinterpret_cast(&cache), node_name, node_incoming, repl_proto_ver, appl_proto_ver)) { log_info << "Passing config to GCS: " << config; if (conn_ == 0) gu_throw_fatal << "could not create gcs connection"; } ~Gcs() { gcs_destroy(conn_); } ssize_t connect(const std::string& cluster_name, const std::string& cluster_url, bool const bootstrap) { return gcs_open(conn_, cluster_name.c_str(), cluster_url.c_str(), bootstrap); } ssize_t set_initial_position(const gu::GTID& gtid) { return gcs_init(conn_, gtid); } void close() { gcs_close(conn_); } ssize_t recv(struct gcs_action& act) { return gcs_recv(conn_, &act); } ssize_t sendv(const WriteSetVector& actv, size_t act_len, gcs_act_type_t act_type, bool scheduled, bool grab) { return gcs_sendv(conn_, &actv[0], act_len, act_type, scheduled, grab); } ssize_t send(const void* act, size_t act_len, gcs_act_type_t act_type, bool scheduled) { return gcs_send(conn_, act, act_len, act_type, scheduled); } ssize_t replv(const WriteSetVector& actv, struct gcs_action& act, bool scheduled) { return gcs_replv(conn_, &actv[0], &act, scheduled); } ssize_t repl(struct gcs_action& act, bool scheduled) { return gcs_repl(conn_, &act, scheduled); } void caused(gu::GTID& gtid, gu::datetime::Date& wait_until) { long err; while ((err = gcs_caused(conn_, gtid)) == -EAGAIN && gu::datetime::Date::calendar() < wait_until) { usleep(1000); } if (err == -EAGAIN) err = -ETIMEDOUT; if (err < 0) { gu_throw_error(-err); } } ssize_t schedule() { return gcs_schedule(conn_); } ssize_t interrupt(ssize_t handle) { return gcs_interrupt(conn_, handle); } ssize_t resume_recv() { return gcs_resume_recv(conn_); } ssize_t set_last_applied(const gu::GTID& gtid) { assert(gtid.uuid() != GU_UUID_NIL); assert(gtid.seqno() >= 0); return gcs_set_last_applied(conn_, gtid); } int vote(const gu::GTID& gtid, uint64_t const code, const void* const msg, size_t const msg_len) { assert(gtid.uuid() != GU_UUID_NIL); assert(gtid.seqno() >= 0); return gcs_vote(conn_, gtid, code, msg, msg_len); } ssize_t request_state_transfer(int version, const void* req, ssize_t req_len, const std::string& sst_donor, const gu::GTID& ist_gtid, gcs_seqno_t& seqno_l) { return gcs_request_state_transfer(conn_, version, req, req_len, sst_donor.c_str(), ist_gtid, seqno_l); } ssize_t desync (gcs_seqno_t& seqno_l) { return gcs_desync(conn_, seqno_l); } void join (const gu::GTID& gtid, int const code) { long const err(gcs_join(conn_, gtid, code)); if (err < 0) { gu_throw_error (-err) << "gcs_join(" << gtid << ") failed"; } } gcs_seqno_t local_sequence() { return gcs_local_sequence(conn_); } void get_stats(gcs_stats* stats) const { return gcs_get_stats(conn_, stats); } void flush_stats() { return gcs_flush_stats(conn_); } void get_status(gu::Status& status) const { gcs_get_status(conn_, status); } void param_set (const std::string& key, const std::string& value) { long ret = gcs_param_set (conn_, key.c_str(), value.c_str()); if (1 == ret) { throw gu::NotFound(); } else if (ret) { gu_throw_error(-ret) << "Setting '" << key << "' to '" << value << "' failed"; } } char* param_get (const std::string& key) const { gu_throw_error(ENOSYS) << "Not implemented: " << __FUNCTION__; return 0; } size_t max_action_size() const { return GCS_MAX_ACT_SIZE; } private: Gcs(const Gcs&); void operator=(const Gcs&); gcs_conn_t* conn_; }; class DummyGcs : public GcsI { public: DummyGcs(gu::Config& config, gcache::GCache& cache, int repl_proto_ver = 0, int appl_proto_ver = 0, const char* node_name = 0, const char* node_incoming = 0); DummyGcs(); // for unit tests ~DummyGcs(); ssize_t connect(const std::string& cluster_name, const std::string& cluster_url, bool bootstrap); ssize_t set_initial_position(const gu::GTID& gtid); void close(); ssize_t recv(gcs_action& act); ssize_t sendv(const WriteSetVector&, size_t, gcs_act_type_t, bool, bool) { return -ENOSYS; } ssize_t send(const void*, size_t, gcs_act_type_t, bool) { return -ENOSYS; } ssize_t replv(const WriteSetVector& actv, gcs_action& act, bool scheduled) { ssize_t ret(set_seqnos(act)); if (gu_likely(0 != gcache_ && ret > 0)) { assert (ret == act.size); gu::byte_t* ptr( reinterpret_cast(gcache_->malloc(act.size))); act.buf = ptr; ssize_t copied(0); for (int i(0); copied < act.size; ++i) { memcpy (ptr + copied, actv[i].ptr, actv[i].size); copied += actv[i].size; } assert (copied == act.size); } return ret; } ssize_t repl(gcs_action& act, bool scheduled) { ssize_t ret(set_seqnos(act)); if (gu_likely(0 != gcache_ && ret > 0)) { assert (ret == act.size); void* const ptr(gcache_->malloc(act.size)); memcpy (ptr, act.buf, act.size); act.buf = ptr; // no freeing here - initial act.buf belongs to the caller } return ret; } void caused(gu::GTID& gtid, gu::datetime::Date& wait_until) { gtid.set(uuid_, global_seqno_); } ssize_t schedule() { return 1; } ssize_t interrupt(ssize_t handle); ssize_t resume_recv() { return 0; } ssize_t set_last_applied(const gu::GTID& gtid) { gu::Lock lock(mtx_); last_applied_ = gtid.seqno(); report_last_applied_ = true; cond_.signal(); return 0; } gcs_seqno_t last_applied() const { return last_applied_; } int vote(const gu::GTID& gtid, uint64_t const code, const void* const msg, size_t const msg_len) { return 0; // we always agree with ourselves } ssize_t request_state_transfer(int version, const void* req, ssize_t req_len, const std::string& sst_donor, const gu::GTID& ist_gtid, gcs_seqno_t& seqno_l) { seqno_l = GCS_SEQNO_ILL; return -ENOSYS; } ssize_t desync (gcs_seqno_t& seqno_l) { seqno_l = GCS_SEQNO_ILL; return -ENOTCONN; } void join(const gu::GTID& gtid, int const code) { gu_throw_error(ENOTCONN); } gcs_seqno_t local_sequence() { gu::Lock lock(mtx_); return ++local_seqno_; } void get_stats(gcs_stats* stats) const { memset (stats, 0, sizeof(*stats)); } void flush_stats() {} void get_status(gu::Status& status) const {} void param_set (const std::string& key, const std::string& value) {} char* param_get (const std::string& key) const { return 0; } size_t max_action_size() const { return 0x7FFFFFFF; } private: typedef enum { S_CLOSED, S_OPEN, S_CONNECTED, S_SYNCED } conn_state_t; ssize_t generate_seqno_action (gcs_action& act, gcs_act_type_t type); ssize_t generate_cc (bool primary); gu::Config* gconf_; gcache::GCache* gcache_; gu::Mutex mtx_; gu::Cond cond_; gcs_seqno_t global_seqno_; gcs_seqno_t local_seqno_; gu::UUID uuid_; gcs_seqno_t last_applied_; conn_state_t state_; gu::Lock* schedule_; void* cc_; ssize_t cc_size_; std::string const my_name_; std::string const incoming_; int repl_proto_ver_; int appl_proto_ver_; bool report_last_applied_; ssize_t set_seqnos (gcs_action& act) { act.seqno_g = GCS_SEQNO_ILL; act.seqno_l = GCS_SEQNO_ILL; ssize_t ret(-EBADFD); { gu::Lock lock(mtx_); switch (state_) { case S_CONNECTED: case S_SYNCED: { ++global_seqno_; act.seqno_g = global_seqno_; ++local_seqno_; act.seqno_l = local_seqno_; ret = act.size; break; } case S_CLOSED: ret = -EBADFD; break; case S_OPEN: ret = -ENOTCONN; break; } } return ret; } DummyGcs (const DummyGcs&); DummyGcs& operator=(const DummyGcs&); }; } #endif // GALERA_GCS_HPP galera-26.4.3/galera/src/fsm.hpp0000664000177500017540000001443513540715002014705 0ustar dbartmy// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_FSM_HPP #define GALERA_FSM_HPP #include "gu_unordered.hpp" #include "gu_throw.hpp" #include #include namespace galera { class EmptyGuard { public: bool operator()() const { return true; } }; class EmptyAction { public: void operator()() { } }; template class FSM { public: class TransAttr { public: TransAttr() : pre_guard_(0), post_guard_(0), pre_action_(0), post_action_(0) { } std::list pre_guard_; std::list post_guard_; std::list pre_action_; std::list post_action_; }; typedef gu::UnorderedMap TransMap; typedef std::pair StateEntry; FSM(State const initial_state) : delete_(true), trans_map_(new TransMap), state_(initial_state, 0), state_hist_() { } FSM(TransMap* const trans_map, State const initial_state) : delete_(false), trans_map_(trans_map), state_(initial_state, 0), state_hist_() { } ~FSM() { if (delete_ == true) delete trans_map_; } void shift_to(State const state, int const line = -1) { typename TransMap::iterator i(trans_map_->find(Transition(state_.first, state))); if (i == trans_map_->end()) { log_fatal << "FSM: no such a transition " << state_.first << " -> " << state; // gu_throw_fatal << "FSM: no such a transition " // << state_ << " -> " << state; abort(); // we want to catch it in the stack } typename std::list::const_iterator gi; for (gi = i->second.pre_guard_.begin(); gi != i->second.pre_guard_.end(); ++gi) { if ((*gi)() == false) { log_fatal << "FSM: pre guard failed for " << state_.first << " -> " << state; gu_throw_fatal << "FSM: pre guard failed for " << state_.first << " -> " << state; } } typename std::list::iterator ai; for (ai = i->second.pre_action_.begin(); ai != i->second.pre_action_.end(); ++ai) { (*ai)(); } StateEntry const se(state, line); state_hist_.push_back(state_); state_ = se; for (ai = i->second.post_action_.begin(); ai != i->second.post_action_.end(); ++ai) { (*ai)(); } for (gi = i->second.post_guard_.begin(); gi != i->second.post_guard_.end(); ++gi) { if ((*gi)() == false) { log_fatal << "FSM: post guard failed for " << state_.first << " -> " << state; gu_throw_fatal << "FSM: post guard failed for " << state_.first << " -> " << state; } } } void force(State const state) { state_ = StateEntry(state, 0); } void reset_history() { state_hist_.clear(); } const State& operator()() const { return state_.first; } const StateEntry& get_state_entry() const { return state_; } void add_transition(Transition const& trans) { if (trans_map_->insert( std::make_pair(trans, TransAttr())).second == false) { gu_throw_fatal << "transition " << trans.from() << " -> " << trans.to() << " already exists"; } } void add_pre_guard(Transition const& trans, Guard const& guard) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.pre_guard_.push_back(guard); } void add_post_guard(Transition const& trans, Guard const& guard) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.post_guard_.push_back(guard); } void add_pre_action(Transition const& trans, Action const& action) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.pre_action_.push_back(action); } void add_post_action(Transition const& trans, Action const& action) { typename TransMap::iterator i(trans_map_->find(trans)); if (i == trans_map_->end()) { gu_throw_fatal << "no such a transition " << trans.from() << " -> " << trans.to(); } i->second.post_action_.push_back(action); } const std::vector& history() const { return state_hist_; } private: FSM(const FSM&); void operator=(const FSM&); bool delete_; TransMap* const trans_map_; StateEntry state_; std::vector state_hist_; }; } #endif // GALERA_FSM_HPP galera-26.4.3/galera/src/gcs_action_source.hpp0000664000177500017540000000355313540715002017610 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #ifndef GALERA_GCS_ACTION_SOURCE_HPP #define GALERA_GCS_ACTION_SOURCE_HPP #include "action_source.hpp" #include "galera_gcs.hpp" #ifndef NDEBUG #include "replicator.hpp" #define REPL_IMPL Replicator #else #include "replicator_smm.hpp" #define REPL_IMPL ReplicatorSMM #endif #include "trx_handle.hpp" #include "GCache.hpp" #include "gu_atomic.hpp" namespace galera { class GcsActionSource : public galera::ActionSource { public: GcsActionSource(TrxHandleSlave::Pool& sp, GCS_IMPL& gcs, REPL_IMPL& replicator, gcache::GCache& gcache) : trx_pool_ (sp ), gcs_ (gcs ), replicator_ (replicator), gcache_ (gcache ), received_ (0 ), received_bytes_(0 ) { } ~GcsActionSource() { log_info << trx_pool_; } ssize_t process(void*, bool& exit_loop); long long received() const { return received_(); } long long received_bytes() const { return received_bytes_(); } private: void process_writeset(void* recv_ctx, const struct gcs_action& act, bool& exit_loop); void resend_writeset(const struct gcs_action& act); void dispatch(void*, const gcs_action&, bool& exit_loop); TrxHandleSlave::Pool& trx_pool_; GCS_IMPL& gcs_; REPL_IMPL& replicator_; gcache::GCache& gcache_; gu::Atomic received_; gu::Atomic received_bytes_; }; } #endif // GALERA_GCS_ACTION_SOURCE_HPP galera-26.4.3/galera/src/wsrep_params.cpp0000664000177500017540000000404113540715002016606 0ustar dbartmy// // Copyright (C) 2010-2014 Codership Oy // #include "wsrep_params.hpp" #include "gu_dbug.h" #include "gu_debug_sync.hpp" void wsrep_set_params (galera::Replicator& repl, const char* params) { if (!params) return; std::vector > pv; gu::Config::parse (pv, params); for (size_t i(0); i < pv.size(); ++i) { const std::string& key(pv[i].first); const std::string& value(pv[i].second); try { if (key == galera::Replicator::Param::debug_log) { bool val(gu::from_string(value)); if (val == true) { gu_conf_debug_on(); } else { gu_conf_debug_off(); } } #ifdef GU_DBUG_ON else if (key == galera::Replicator::Param::dbug) { if (value.empty()) { GU_DBUG_POP(); } else { GU_DBUG_PUSH(value.c_str()); } } else if (key == galera::Replicator::Param::signal) { gu_debug_sync_signal(value); } #endif /* GU_DBUG_ON */ else { log_debug << "Setting param '" << key << "' = '" << value << '\''; repl.param_set(key, value); } } catch (gu::NotFound&) { log_warn << "Unknown parameter '" << key << "'"; gu_throw_error(EINVAL) << "Unknown parameter' " << key << "'"; } catch (gu::Exception& e) { log_warn << "Setting parameter '" << key << "' to '" << value << "' failed: " << e.what(); throw; } } } char* wsrep_get_params(const galera::Replicator& repl) { std::ostringstream os; os << repl.params(); return strdup(os.str().c_str()); } galera-26.4.3/galera/src/gcs_dummy.cpp0000664000177500017540000001352613540715002016102 0ustar dbartmy// // Copyright (C) 2011-2015 Codership Oy // #include "galera_gcs.hpp" namespace galera { DummyGcs::DummyGcs(gu::Config& config, gcache::GCache& cache, int repl_proto_ver, int appl_proto_ver, const char* node_name, const char* node_incoming) : gconf_ (&config), gcache_ (&cache), mtx_ (), cond_ (), global_seqno_ (0), local_seqno_ (0), uuid_ (NULL, 0), last_applied_ (GCS_SEQNO_ILL), state_ (S_OPEN), schedule_ (0), cc_ (0), cc_size_ (0), my_name_ (node_name ? node_name : "not specified"), incoming_ (node_incoming ? node_incoming : "not given"), repl_proto_ver_(repl_proto_ver), appl_proto_ver_(appl_proto_ver), report_last_applied_(false) {} DummyGcs::DummyGcs() : gconf_ (0), gcache_ (0), mtx_ (), cond_ (), global_seqno_ (0), local_seqno_ (0), uuid_ (NULL, 0), last_applied_ (GCS_SEQNO_ILL), state_ (S_OPEN), schedule_ (0), cc_ (0), cc_size_ (0), my_name_ ("not specified"), incoming_ ("not given"), repl_proto_ver_(1), appl_proto_ver_(1), report_last_applied_(false) {} DummyGcs::~DummyGcs() { gu::Lock lock(mtx_); assert(0 == schedule_); if (cc_) { assert (cc_size_ > 0); ::free(cc_); } } ssize_t DummyGcs::generate_cc (bool primary) { gcs_act_cchange cc; gcs_node_state_t const my_state (primary ? GCS_NODE_STATE_JOINED : GCS_NODE_STATE_NON_PRIM); if (primary) { ++global_seqno_; cc.seqno = global_seqno_; cc.conf_id = 1; cc.uuid = *uuid_.ptr(); cc.repl_proto_ver = repl_proto_ver_; cc.appl_proto_ver = appl_proto_ver_; /* we have single member here */ gcs_act_cchange::member m; m.uuid_ = *uuid_.ptr(); m.name_ = my_name_; m.incoming_ = incoming_; m.state_ = my_state; cc.memb.push_back(m); } else { cc.seqno = GCS_SEQNO_ILL; cc.conf_id = -1; } cc_size_ = cc.write(&cc_); if (!cc_) { cc_size_ = 0; return -ENOMEM; } return cc_size_; } ssize_t DummyGcs::connect(const std::string& cluster_name, const std::string& cluster_url, bool bootstrap) { gu::Lock lock(mtx_); ssize_t ret = generate_cc (true); if (ret > 0) { cond_.signal(); ret = 0; } return ret; } ssize_t DummyGcs::set_initial_position(const gu::GTID& gtid) { gu::Lock lock(mtx_); if (gtid.uuid() != GU_UUID_NIL && gtid.seqno() >= 0) { uuid_ = gtid.uuid(); global_seqno_ = gtid.seqno(); } return 0; } void DummyGcs::close() { log_info << "Closing DummyGcs"; gu::Lock lock(mtx_); generate_cc (false); // state_ = S_CLOSED; cond_.broadcast(); // usleep(100000); // 0.1s } ssize_t DummyGcs::generate_seqno_action (gcs_action& act, gcs_act_type_t type) { gcs_seqno_t* const seqno (static_cast(::malloc(sizeof(gcs_seqno_t)))); if (!seqno) return -ENOMEM; *seqno = global_seqno_; ++local_seqno_; act.buf = seqno; act.size = sizeof(*seqno); act.seqno_l = local_seqno_; act.type = type; return act.size; } ssize_t DummyGcs::recv(gcs_action& act) { act.seqno_g = GCS_SEQNO_ILL; act.seqno_l = GCS_SEQNO_ILL; gu::Lock lock(mtx_); do { if (cc_) { ++local_seqno_; act.buf = cc_; act.size = cc_size_; act.seqno_l = local_seqno_; act.type = GCS_ACT_CCHANGE; cc_ = 0; cc_size_ = 0; gcs_act_cchange const cc(act.buf, act.size); act.seqno_g = (cc.conf_id >= 0 ? 0 : -1); int const my_idx(act.seqno_g); if (my_idx < 0) { assert (0 == cc.memb.size()); state_ = S_CLOSED; } else { assert (1 == cc.memb.size()); state_ = S_CONNECTED; } return act.size; } else if (S_CONNECTED == state_) { ssize_t ret = generate_seqno_action(act, GCS_ACT_SYNC); if (ret > 0) state_ = S_SYNCED; return ret; } else if (report_last_applied_) { report_last_applied_ = false; return generate_seqno_action(act, GCS_ACT_COMMIT_CUT); } } while (state_ > S_OPEN && (lock.wait(cond_), true)); switch (state_) { case S_OPEN: return -ENOTCONN; case S_CLOSED: return 0; default: abort(); } } ssize_t DummyGcs::interrupt(ssize_t handle) { log_fatal << "Attempt to interrupt handle: " << handle; abort(); return -ENOSYS; } } galera-26.4.3/galera/src/ist_proto.hpp0000664000177500017540000006655613540715002016155 0ustar dbartmy// // Copyright (C) 2011-2019 Codership Oy // #ifndef GALERA_IST_PROTO_HPP #define GALERA_IST_PROTO_HPP #include "gcs.hpp" #include "trx_handle.hpp" #include "GCache.hpp" #include "gu_asio.hpp" #include "gu_logger.hpp" #include "gu_serialize.hpp" #include "gu_vector.hpp" #include "gu_array.hpp" #include // // Message class must have non-virtual destructor until // support up to version 3 is removed as serialization/deserialization // depends on the size of the class. // #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif // // Sender Receiver // connect() -----> accept() // <----- send_handshake() // send_handshake_response() -----> // <----- send_ctrl(OK) // send_trx() -----> // -----> // send_ctrl(EOF) -----> // <----- close() // close() // // Note about protocol/message versioning: // Version is determined by GCS and IST protocol is initialized in total // order. Therefore it is not necessary to negotiate version at IST level, // it should be enough to check that message version numbers match. // namespace galera { namespace ist { static int const VER21 = 4; static int const VER40 = 10; class Message { public: typedef enum { T_NONE = 0, T_HANDSHAKE = 1, T_HANDSHAKE_RESPONSE = 2, T_CTRL = 3, T_TRX = 4, T_CCHANGE = 5, T_SKIP = 6 } Type; typedef enum { F_PRELOAD = 0x1 } Flag; explicit Message(int version, Type type = T_NONE, uint8_t flags = 0, int8_t ctrl = 0, uint32_t len = 0, wsrep_seqno_t seqno = WSREP_SEQNO_UNDEFINED) : seqno_ (seqno ), len_ (len ), type_ (type ), version_(version), flags_ (flags ), ctrl_ (ctrl ) {} int version() const { return version_; } Type type() const { return type_ ; } uint8_t flags() const { return flags_ ; } int8_t ctrl() const { return ctrl_ ; } uint32_t len() const { return len_ ; } wsrep_seqno_t seqno() const { return seqno_ ; } void set_type_seqno(Type t, wsrep_seqno_t s) { type_ = t; seqno_ = s; } ~Message() { } size_t serial_size() const { if (gu_likely(version_ >= VER40)) { // header: version 1 byte, type 1 byte, flags 1 byte, // ctrl field 1 byte, length 4 bytes, seqno 8 bytes return 4 + 4 + 8 + sizeof(checksum_t); } else { // header: version 1 byte, type 1 byte, flags 1 byte, // ctrl field 1 byte, length 8 bytes return 4 + 8; } } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset)const { assert(version_ >= VER21); size_t const orig_offset(offset); offset = gu::serialize1(uint8_t(version_), buf, buflen, offset); offset = gu::serialize1(uint8_t(type_), buf, buflen, offset); offset = gu::serialize1(flags_, buf, buflen, offset); offset = gu::serialize1(ctrl_, buf, buflen, offset); if (gu_likely(version_ >= VER40)) { offset = gu::serialize4(len_, buf, buflen, offset); offset = gu::serialize8(seqno_, buf, buflen, offset); *reinterpret_cast(buf + offset) = htog_checksum(buf + orig_offset, offset - orig_offset); offset += sizeof(checksum_t); } else /**/ { uint64_t const tmp(len_); offset = gu::serialize8(tmp, buf, buflen, offset); } assert(offset - orig_offset == serial_size()); return offset; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { assert(version_ >= VER21); size_t orig_offset(offset); uint8_t u8; offset = gu::unserialize1(buf, buflen, offset, u8); if (gu_unlikely(u8 != version_)) throw_invalid_version(u8); offset = gu::unserialize1(buf, buflen, offset, u8); type_ = static_cast(u8); offset = gu::unserialize1(buf, buflen, offset, flags_); offset = gu::unserialize1(buf, buflen, offset, ctrl_); if (gu_likely(version_ >= VER40)) { offset = gu::unserialize4(buf, buflen, offset, len_); offset = gu::unserialize8(buf, buflen, offset, seqno_); checksum_t const computed(htog_checksum(buf + orig_offset, offset-orig_offset)); const checksum_t* expected (reinterpret_cast(buf + offset)); if (gu_unlikely(computed != *expected)) throw_corrupted_header(); offset += sizeof(checksum_t); } else { uint64_t tmp; offset = gu::unserialize8(buf, buflen, offset, tmp); assert(tmp < std::numeric_limits::max()); len_ = tmp; } assert(offset - orig_offset == serial_size()); return offset; } private: wsrep_seqno_t seqno_; uint32_t len_; Type type_; uint8_t version_; uint8_t flags_; int8_t ctrl_; typedef uint64_t checksum_t; // returns endian-adjusted checksum of buf static checksum_t htog_checksum(const void* const buf, size_t const size) { return gu::htog(gu::FastHash::digest(buf, size)); } void throw_invalid_version(uint8_t v); void throw_corrupted_header(); }; std::ostream& operator<< (std::ostream& os, const Message& m); class Handshake : public Message { public: Handshake(int version = -1) : Message(version, Message::T_HANDSHAKE, 0, 0, 0) { } }; class HandshakeResponse : public Message { public: HandshakeResponse(int version = -1) : Message(version, Message::T_HANDSHAKE_RESPONSE, 0, 0, 0) { } }; class Ctrl : public Message { public: enum { // negative values reserved for error codes C_OK = 0, C_EOF = 1 }; Ctrl(int version = -1, int8_t code = 0) : Message(version, Message::T_CTRL, 0, code, 0) { } }; class Ordered : public Message { public: Ordered(int version, Type type, uint8_t flags, uint32_t len, wsrep_seqno_t const seqno) : Message(version, type, flags, 0, len, seqno) { } }; class Proto { public: Proto(gcache::GCache& gc, int version, bool keep_keys) : gcache_ (gc), raw_sent_ (0), real_sent_(0), version_ (version), keep_keys_(keep_keys) { } ~Proto() { if (raw_sent_ > 0) { log_info << "ist proto finished, raw sent: " << raw_sent_ << " real sent: " << real_sent_ << " frac: " << (raw_sent_ == 0 ? 0. : static_cast(real_sent_)/raw_sent_); } } template void send_handshake(ST& socket) { Handshake hs(version_); gu::Buffer buf(hs.serial_size()); size_t offset(hs.serialize(&buf[0], buf.size(), 0)); size_t n(asio::write(socket, asio::buffer(&buf[0], buf.size()))); if (n != offset) { gu_throw_error(EPROTO) << "error sending handshake"; } } template void recv_handshake(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving handshake"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "handshake msg: " << msg.version() << " " << msg.type() << " " << msg.len(); switch (msg.type()) { case Message::T_HANDSHAKE: break; case Message::T_CTRL: switch (msg.ctrl()) { case Ctrl::C_EOF: gu_throw_error(EINTR); default: gu_throw_error(EPROTO) << "unexpected ctrl code: " << msg.ctrl(); } break; default: gu_throw_error(EPROTO) << "unexpected message type: " << msg.type(); } if (msg.version() != version_) { gu_throw_error(EPROTO) << "mismatching protocol version: " << msg.version() << " required: " << version_; } // TODO: Figure out protocol versions to use } template void send_handshake_response(ST& socket) { HandshakeResponse hsr(version_); gu::Buffer buf(hsr.serial_size()); size_t offset(hsr.serialize(&buf[0], buf.size(), 0)); size_t n(asio::write(socket, asio::buffer(&buf[0], buf.size()))); if (n != offset) { gu_throw_error(EPROTO) << "error sending handshake response"; } } template void recv_handshake_response(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving handshake"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "handshake response msg: " << msg.version() << " " << msg.type() << " " << msg.len(); switch (msg.type()) { case Message::T_HANDSHAKE_RESPONSE: break; case Message::T_CTRL: switch (msg.ctrl()) { case Ctrl::C_EOF: gu_throw_error(EINTR) << "interrupted by ctrl"; default: gu_throw_error(EPROTO) << "unexpected ctrl code: " << msg.ctrl(); } default: gu_throw_error(EINVAL) << "unexpected message type: " << msg.type(); } } template void send_ctrl(ST& socket, int8_t code) { Ctrl ctrl(version_, code); gu::Buffer buf(ctrl.serial_size()); size_t offset(ctrl.serialize(&buf[0], buf.size(), 0)); size_t n(asio::write(socket, asio::buffer(&buf[0],buf.size()))); if (n != offset) { gu_throw_error(EPROTO) << "error sending ctrl message"; } } template int8_t recv_ctrl(ST& socket) { Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving handshake"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "msg: " << msg.version() << " " << msg.type() << " " << msg.len(); switch (msg.type()) { case Message::T_CTRL: break; default: gu_throw_error(EPROTO) << "unexpected message type: " << msg.type(); } return msg.ctrl(); } template void send_ordered(ST& socket, const gcache::GCache::Buffer& buffer, bool const preload_flag) { Message::Type type(ordered_type(buffer)); gu::array::type cbs; size_t payload_size; /* size of the 2nd cbs buffer */ size_t sent; // for proto ver < VER40 compatibility int64_t seqno_d(WSREP_SEQNO_UNDEFINED); if (gu_likely(Message::T_SKIP != type)) { assert(Message::T_TRX == type || version_ >= VER40); galera::WriteSetIn ws; gu::Buf tmp = { buffer.ptr(), buffer.size() }; if (keep_keys_ || Message::T_CCHANGE == type) { payload_size = buffer.size(); const void* const ptr(buffer.ptr()); cbs[1] = asio::const_buffer(ptr, payload_size); cbs[2] = asio::const_buffer(ptr, 0); if (gu_likely(Message::T_TRX == type)) // compatibility { ws.read_header (tmp); seqno_d = buffer.seqno_g() - ws.pa_range(); assert(buffer.seqno_g() == ws.seqno()); } } else { ws.read_buf (tmp, 0); WriteSetIn::GatherVector out; payload_size = ws.gather (out, false, false); assert (2 == out->size()); cbs[1] = asio::const_buffer(out[0].ptr, out[0].size); cbs[2] = asio::const_buffer(out[1].ptr, out[1].size); seqno_d = buffer.seqno_g() - ws.pa_range(); assert(buffer.seqno_g() == ws.seqno()); } } else { assert(Message::T_SKIP == type); payload_size = 0; seqno_d = WSREP_SEQNO_UNDEFINED; /* in proto ver < VER40 everything is T_TRX */ if (gu_unlikely(version_ < VER40)) type = Message::T_TRX; } /* in version >= 3 metadata is included in Msg header, leaving * it here for backward compatibility */ size_t const trx_meta_size(version_ >= VER40 ? 0 : (8 /* seqno_g */ + 8 /* seqno_d */)); uint8_t const msg_flags((version_ >= VER40 && preload_flag) ? Message::F_PRELOAD : 0); Ordered to_msg(version_, type, msg_flags, trx_meta_size + payload_size, buffer.seqno_g()); gu::Buffer buf(to_msg.serial_size() + trx_meta_size); size_t offset(to_msg.serialize(&buf[0], buf.size(), 0)); if (gu_unlikely(version_ < VER40)) { offset = gu::serialize8(buffer.seqno_g(), &buf[0], buf.size(), offset); offset = gu::serialize8(seqno_d, &buf[0], buf.size(), offset); } cbs[0] = asio::const_buffer(&buf[0], buf.size()); if (gu_likely(payload_size)) { sent = asio::write(socket, cbs); } else { sent = asio::write(socket, asio::buffer(cbs[0])); } log_debug << "sent " << sent << " bytes"; } template void skip_bytes(ST& socket, size_t bytes) { gu::Buffer buf(4092); while (bytes > 0) { bytes -= asio::read( socket, asio::buffer(&buf[0], std::min(buf.size(), bytes))); } assert(bytes == 0); } template void recv_ordered(ST& socket, std::pair& ret) { gcs_action& act(ret.first); act.seqno_g = 0; // EOF // act.seqno_l has no significance act.buf = NULL; // skip act.size = 0; // skip act.type = GCS_ACT_UNKNOWN; // EOF Message msg(version_); gu::Buffer buf(msg.serial_size()); size_t n(asio::read(socket, asio::buffer(&buf[0], buf.size()))); if (n != buf.size()) { gu_throw_error(EPROTO) << "error receiving trx header"; } (void)msg.unserialize(&buf[0], buf.size(), 0); log_debug << "received header: " << n << " bytes, type " << msg.type() << " len " << msg.len(); switch (msg.type()) { case Message::T_TRX: case Message::T_CCHANGE: case Message::T_SKIP: { size_t offset(0); int64_t seqno_g(msg.seqno()); // compatibility with 3.x if (gu_unlikely(version_ < VER40)) //compatibility with 3.x { assert(msg.type() == Message::T_TRX); int64_t seqno_d; buf.resize(sizeof(seqno_g) + sizeof(seqno_d)); n = asio::read(socket, asio::buffer(&buf[0],buf.size())); if (n != buf.size()) { assert(0); gu_throw_error(EPROTO) << "error reading trx meta data"; } offset = gu::unserialize8(&buf[0],buf.size(),0,seqno_g); if (gu_unlikely(seqno_g <= 0)) { assert(0); gu_throw_error(EINVAL) << "non-positive sequence number " << seqno_g; } offset = gu::unserialize8(&buf[0], buf.size(), offset, seqno_d); if (gu_unlikely(seqno_d == WSREP_SEQNO_UNDEFINED && offset != msg.len())) { assert(0); gu_throw_error(EINVAL) << "message size " << msg.len() << " does not match expected size "<< offset; } Message::Type const type (seqno_d >= 0 ? Message::T_TRX : Message::T_SKIP); msg.set_type_seqno(type, seqno_g); } else // end compatibility with 3.x { assert(seqno_g > 0); } assert(msg.seqno() > 0); /* Backward compatibility code above could change msg type. * but it should not change below. Saving const for later * assert(). */ Message::Type const msg_type(msg.type()); gcs_act_type const gcs_type (msg_type == Message::T_CCHANGE ? GCS_ACT_CCHANGE : GCS_ACT_WRITESET); const void* wbuf; ssize_t wsize; bool already_cached(false); // Check if cert index preload trx is already in gcache. if ((msg.flags() & Message::F_PRELOAD)) { ret.second = true; try { wbuf = gcache_.seqno_get_ptr(seqno_g, wsize); skip_bytes(socket, msg.len() - offset); already_cached = true; } catch (gu::NotFound& nf) { // not found from gcache, continue as normal } } if (!already_cached) { if (gu_likely(msg_type != Message::T_SKIP)) { wsize = msg.len() - offset; void* const ptr(gcache_.malloc(wsize)); ssize_t const r (asio::read(socket, asio::buffer(ptr, wsize))); if (gu_unlikely(r != wsize)) { gu_throw_error(EPROTO) << "error reading write set data"; } wbuf = ptr; } else { wsize = GU_WORDSIZE/8; // bits to bytes wbuf = gcache_.malloc(wsize); } gcache_.seqno_assign(wbuf, msg.seqno(), gcs_type, msg_type == Message::T_SKIP); } assert(msg.type() == msg_type); switch(msg_type) { case Message::T_TRX: case Message::T_CCHANGE: act.buf = wbuf; // not skip act.size = wsize; // fall through case Message::T_SKIP: act.seqno_g = msg.seqno(); // not EOF act.type = gcs_type; break; default: gu_throw_error(EPROTO) << "Unrecognized message type" << msg_type; } return; } case Message::T_CTRL: switch (msg.ctrl()) { case Ctrl::C_EOF: return; default: if (msg.ctrl() >= 0) { gu_throw_error(EPROTO) << "unexpected ctrl code: " << msg.ctrl(); } else { gu_throw_error(-msg.ctrl()) <<"peer reported error"; } } default: gu_throw_error(EPROTO) << "unexpected message type: " << msg.type(); } gu_throw_fatal; throw; } private: gcache::GCache& gcache_; uint64_t raw_sent_; uint64_t real_sent_; int version_; bool keep_keys_; Message::Type ordered_type(const gcache::GCache::Buffer& buf) { assert(buf.type() == GCS_ACT_WRITESET || buf.type() == GCS_ACT_CCHANGE); if (gu_likely(!buf.skip())) { switch (buf.type()) { case GCS_ACT_WRITESET: return Message::T_TRX; case GCS_ACT_CCHANGE: return (version_ >= VER40 ? Message::T_CCHANGE : Message::T_SKIP); default: log_error << "Unsupported message type from cache: " << buf.type() << ". Skipping seqno " << buf.seqno_g(); assert(0); return Message::T_SKIP; } } else { return Message::T_SKIP; } } }; } } #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif #endif // GALERA_IST_PROTO_HPP galera-26.4.3/galera/src/write_set.hpp0000664000177500017540000000416013540715002016117 0ustar dbartmy// // Copyright (C) 2010 Codership Oy // #ifndef GALERA_WRITE_SET_HPP #define GALERA_WRITE_SET_HPP #include "key_os.hpp" #include "key_data.hpp" #include "wsrep_api.h" #include "gu_buffer.hpp" #include "gu_logger.hpp" #include "gu_unordered.hpp" #include #include #include namespace galera { class WriteSet { public: typedef std::deque KeySequence; WriteSet(int version) : version_(version), keys_(), key_refs_(), data_() { } void set_version(int version) { version_ = version; } const gu::Buffer& get_data() const { return data_; } void append_key(const KeyData&); void append_data(const void*data, size_t data_len) { data_.reserve(data_.size() + data_len); data_.insert(data_.end(), static_cast(data), static_cast(data) + data_len); } void get_keys(KeySequence&) const; const gu::Buffer& get_key_buf() const { return keys_; } bool empty() const { return (data_.size() == 0 && keys_.size() == 0); } void clear() { keys_.clear(), key_refs_.clear(), data_.clear(); } // Return offset to beginning of key or data segment and length // of that segment static std::pair segment(const gu::byte_t*, size_t, size_t); // Scan key sequence from buffer, return offset from the beginning of // buffer after scan. static size_t keys(const gu::byte_t*, size_t, size_t, int, KeySequence&); size_t serialize(gu::byte_t*, size_t, size_t) const; size_t unserialize(const gu::byte_t*, size_t, size_t); size_t serial_size() const; private: typedef gu::UnorderedMultimap KeyRefMap; int version_; gu::Buffer keys_; KeyRefMap key_refs_; gu::Buffer data_; }; } #endif // GALERA_WRITE_SET_HPP galera-26.4.3/galera/src/key_entry_os.cpp0000664000177500017540000000316013540715002016616 0ustar dbartmy// // Copyright (C) 2012 Codership Oy // #include "key_entry_os.hpp" #include "trx_handle.hpp" namespace galera { #ifndef NDEBUG void KeyEntryOS::assert_ref(TrxHandleSlave* trx, bool full_key) const { assert(ref_trx_ == 0 || ref_trx_->global_seqno() <= trx->global_seqno()); if (full_key) { assert(ref_full_trx_ == 0 || (ref_full_trx_->global_seqno() <= trx->global_seqno() && ref_trx_ != 0)); } } void KeyEntryOS::assert_unref(TrxHandleSlave* trx) const { if (ref_full_trx_ != 0 && ref_trx_ == 0) { log_fatal << "dereferencing EXCLUSIVE partial key: " << key_ << " by " << trx->global_seqno() << ", while full key referenced by " << ref_full_trx_->global_seqno(); assert(0); } } void KeyEntryOS::assert_ref_shared(TrxHandleSlave* trx, bool full_key) const { assert(ref_shared_trx_ == 0 || ref_shared_trx_->global_seqno() <= trx->global_seqno()); if (full_key) { assert(ref_full_shared_trx_ == 0 || (ref_full_shared_trx_->global_seqno() <= trx->global_seqno() && ref_shared_trx_ != 0)); } } void KeyEntryOS::assert_unref_shared(TrxHandleSlave* trx) const { if (ref_full_shared_trx_ != 0 && ref_shared_trx_ == 0) { log_fatal << "dereferencing SHARED partial key: " << key_ << " by " << trx->global_seqno() << ", while full key referenced by " << ref_full_shared_trx_->global_seqno(); assert(0); } } #endif /* NDEBUG */ } galera-26.4.3/galera/src/wsrep_params.hpp0000664000177500017540000000050113540715002016610 0ustar dbartmy// // Copyright (C) 2010 Codership Oy // #ifndef WSREP_PARAMS_HPP #define WSREP_PARAMS_HPP #include "wsrep_api.h" #include "replicator.hpp" void wsrep_set_params (galera::Replicator& repl, const char* params); char* wsrep_get_params(const galera::Replicator& repl); #endif /* WSREP_PARAMS_HPP */ galera-26.4.3/galera/src/galera_info.cpp0000664000177500017540000000504213540715002016353 0ustar dbartmy// Copyright (C) 2009-2018 Codership Oy #include "galera_info.hpp" #include #include #include #include static size_t view_info_size (int members) { return (sizeof(wsrep_view_info_t) + members * sizeof(wsrep_member_info_t)); } /* create view info out of configuration message */ wsrep_view_info_t* galera_view_info_create (const gcs_act_cchange& conf, wsrep_cap_t const capabilities, int const my_idx, wsrep_uuid_t& my_uuid) { wsrep_view_info_t* ret = static_cast( ::malloc(view_info_size(conf.memb.size()))); if (ret) { wsrep_seqno_t const seqno (conf.seqno != GCS_SEQNO_ILL ? conf.seqno : WSREP_SEQNO_UNDEFINED); wsrep_gtid_t const gtid = { conf.uuid, seqno }; ret->state_id = gtid; ret->view = conf.conf_id; ret->status = conf.conf_id != -1 ? WSREP_VIEW_PRIMARY : WSREP_VIEW_NON_PRIMARY; ret->capabilities = capabilities; ret->my_idx = -1; ret->memb_num = conf.memb.size(); ret->proto_ver = conf.appl_proto_ver; for (int m = 0; m < ret->memb_num; ++m) { const gcs_act_cchange::member& cm(conf.memb[m]); // from wsrep_member_info_t& wm(ret->members[m]); // to wm.id = cm.uuid_; if (wm.id == my_uuid) { ret->my_idx = m; } strncpy(wm.name, cm.name_.c_str(), sizeof(wm.name) - 1); wm.name[sizeof(wm.name) - 1] = '\0'; strncpy(wm.incoming, cm.incoming_.c_str(), sizeof(wm.incoming) - 1); wm.incoming[sizeof(wm.incoming) - 1] = '\0'; } if (WSREP_UUID_UNDEFINED == my_uuid && my_idx >= 0) { assert(-1 == ret->my_idx); ret->my_idx = my_idx; assert(ret->my_idx < ret->memb_num); my_uuid = ret->members[ret->my_idx].id; } } else { gu_throw_error(ENOMEM) << "Failed to allocate galera view info"; } return ret; } /* make a copy of view info object */ wsrep_view_info_t* galera_view_info_copy (const wsrep_view_info_t* vi) { size_t ret_size = view_info_size (vi->memb_num); wsrep_view_info_t* ret = static_cast(malloc (ret_size)); if (ret) { memcpy (ret, vi, ret_size); } return ret; } galera-26.4.3/galera/src/trx_handle.hpp0000664000177500017540000011273013540715002016245 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #ifndef GALERA_TRX_HANDLE_HPP #define GALERA_TRX_HANDLE_HPP #include "write_set.hpp" #include "mapped_buffer.hpp" #include "fsm.hpp" #include "key_data.hpp" // for append_key() #include "key_entry_os.hpp" #include "write_set_ng.hpp" #include "wsrep_api.h" #include "gu_mutex.hpp" #include "gu_atomic.hpp" #include "gu_datetime.hpp" #include "gu_unordered.hpp" #include "gu_utils.hpp" #include "gu_macros.hpp" #include "gu_mem_pool.hpp" #include "gu_vector.hpp" #include "gu_shared_ptr.hpp" #include "gcs.hpp" #include "gu_limits.h" // page size stuff #include namespace galera { class NBOCtx; // forward decl static std::string const working_dir = "/tmp"; // Helper template for building FSMs. template class TransMapBuilder { public: TransMapBuilder() { } void add(typename T::State from, typename T::State to) { trans_map_.insert_unique( std::make_pair(typename T::Transition(from, to), typename T::Fsm::TransAttr())); } private: typename T::Fsm::TransMap& trans_map_; }; class TrxHandle { public: enum Flags { F_COMMIT = 1 << 0, F_ROLLBACK = 1 << 1, F_ISOLATION = 1 << 2, F_PA_UNSAFE = 1 << 3, F_COMMUTATIVE = 1 << 4, F_NATIVE = 1 << 5, F_BEGIN = 1 << 6, F_PREPARE = 1 << 7, F_SNAPSHOT = 1 << 8, F_IMPLICIT_DEPS = 1 << 9, /* * reserved for API extension */ F_PREORDERED = 1 << 15 // flag specific to WriteSet /* * reserved for internal use */ }; static const uint32_t TRXHANDLE_FLAGS_MASK = (1 << 15) | ((1 << 10) - 1); static const uint32_t EXPLICIT_ROLLBACK_FLAGS = F_PA_UNSAFE | F_ROLLBACK; static bool const FLAGS_MATCH_API_FLAGS = (WSREP_FLAG_TRX_END == F_COMMIT && WSREP_FLAG_ROLLBACK == F_ROLLBACK && WSREP_FLAG_ISOLATION == F_ISOLATION && WSREP_FLAG_PA_UNSAFE == F_PA_UNSAFE && WSREP_FLAG_COMMUTATIVE == F_COMMUTATIVE && WSREP_FLAG_NATIVE == F_NATIVE && WSREP_FLAG_TRX_START == F_BEGIN && WSREP_FLAG_TRX_PREPARE == F_PREPARE && WSREP_FLAG_SNAPSHOT == F_SNAPSHOT && WSREP_FLAG_IMPLICIT_DEPS == F_IMPLICIT_DEPS && int(WriteSetNG::F_PREORDERED) ==F_PREORDERED); static uint32_t wsrep_flags_to_trx_flags (uint32_t flags); static uint32_t trx_flags_to_wsrep_flags (uint32_t flags); static uint32_t ws_flags_to_trx_flags (uint32_t flags); bool is_toi() const { return ((write_set_flags_ & F_ISOLATION) != 0); } bool pa_unsafe() const { return ((write_set_flags_ & F_PA_UNSAFE) != 0); } bool preordered() const { return ((write_set_flags_ & F_PREORDERED) != 0); } bool nbo_start() const { return (is_toi() && (write_set_flags_ & F_BEGIN) != 0 && (write_set_flags_ & F_COMMIT) == 0); } bool nbo_end() const { return (is_toi() && (write_set_flags_ & F_BEGIN) == 0 && (write_set_flags_ & F_COMMIT) != 0); } typedef enum { S_EXECUTING, S_MUST_ABORT, S_ABORTING, S_REPLICATING, S_CERTIFYING, S_MUST_REPLAY, // replay S_REPLAYING, S_APPLYING, // grabbing apply monitor, applying S_COMMITTING, // grabbing commit monitor, committing changes S_ROLLING_BACK, S_COMMITTED, S_ROLLED_BACK } State; static const int num_states_ = S_ROLLED_BACK + 1; static void print_state(std::ostream&, State); void print_state_history(std::ostream&) const; class Transition { public: Transition(State const from, State const to) : from_(from), to_(to) { } State from() const { return from_; } State to() const { return to_; } bool operator==(Transition const& other) const { return (from_ == other.from_ && to_ == other.to_); } class Hash { public: size_t operator()(Transition const& tr) const { return (gu::HashValue(static_cast(tr.from_)) ^ gu::HashValue(static_cast(tr.to_))); } }; private: State from_; State to_; }; // class Transition typedef FSM Fsm; int version() const { return version_; } const wsrep_uuid_t& source_id() const { return source_id_; } wsrep_trx_id_t trx_id() const { return trx_id_; } void set_local(bool local) { local_ = local; } bool local() const { return local_; } wsrep_conn_id_t conn_id() const { return conn_id_; } void set_conn_id(wsrep_conn_id_t conn_id) { conn_id_ = conn_id; } State state() const { return state_(); } void print_set_state(State state) const; uint32_t flags() const { return write_set_flags_; } void set_flags(uint32_t flags) { write_set_flags_ = flags; } uint64_t timestamp() const { return timestamp_; } bool master() const { return master_; } void print(std::ostream& os) const; virtual ~TrxHandle() {} // Force state, for testing purposes only. void force_state(State state) { state_.force(state); } protected: void set_state(State const state, int const line) { state_.shift_to(state, line); if (state == S_EXECUTING) state_.reset_history(); } /* slave trx ctor */ TrxHandle(Fsm::TransMap* trans_map, bool local) : state_ (trans_map, S_REPLICATING), source_id_ (WSREP_UUID_UNDEFINED), conn_id_ (-1), trx_id_ (-1), timestamp_ (), version_ (-1), write_set_flags_ (0), local_ (local), master_ (false) {} /* local trx ctor */ TrxHandle(Fsm::TransMap* trans_map, const wsrep_uuid_t& source_id, wsrep_conn_id_t conn_id, wsrep_trx_id_t trx_id, int version) : state_ (trans_map, S_EXECUTING), source_id_ (source_id), conn_id_ (conn_id), trx_id_ (trx_id), timestamp_ (gu_time_calendar()), version_ (version), write_set_flags_ (F_BEGIN), local_ (true), master_ (true) {} Fsm state_; wsrep_uuid_t source_id_; wsrep_conn_id_t conn_id_; wsrep_trx_id_t trx_id_; int64_t timestamp_; int version_; uint32_t write_set_flags_; // Boolean denoting if the TrxHandle was generated locally. // Always true for TrxHandleMaster, set to true to // TrxHandleSlave if there exists TrxHandleMaster object corresponding // to TrxHandleSlave. bool local_; bool master_; // derived object type private: TrxHandle(const TrxHandle&); void operator=(const TrxHandle& other); friend class Wsdb; friend class Certification; template static inline uint32_t wsrep_flags_to_trx_flags_tmpl (uint32_t flags) { assert(0); // remove when needed uint32_t ret(0); if (flags & WSREP_FLAG_TRX_END) ret |= F_COMMIT; if (flags & WSREP_FLAG_ROLLBACK) ret |= F_ROLLBACK; if (flags & WSREP_FLAG_ISOLATION) ret |= F_ISOLATION; if (flags & WSREP_FLAG_PA_UNSAFE) ret |= F_PA_UNSAFE; if (flags & WSREP_FLAG_COMMUTATIVE) ret |= F_COMMUTATIVE; if (flags & WSREP_FLAG_NATIVE) ret |= F_NATIVE; if (flags & WSREP_FLAG_TRX_START) ret |= F_BEGIN; if (flags & WSREP_FLAG_TRX_PREPARE) ret |= F_PREPARE; return ret; } template static inline uint32_t trx_flags_to_wsrep_flags_tmpl (uint32_t flags) { assert(0); // remove when needed uint32_t ret(0); if (flags & F_COMMIT) ret |= WSREP_FLAG_TRX_END; if (flags & F_ROLLBACK) ret |= WSREP_FLAG_ROLLBACK; if (flags & F_ISOLATION) ret |= WSREP_FLAG_ISOLATION; if (flags & F_PA_UNSAFE) ret |= WSREP_FLAG_PA_UNSAFE; if (flags & F_COMMUTATIVE) ret |= WSREP_FLAG_COMMUTATIVE; if (flags & F_NATIVE) ret |= WSREP_FLAG_NATIVE; if (flags & F_BEGIN) ret |= WSREP_FLAG_TRX_START; if (flags & F_PREPARE) ret |= WSREP_FLAG_TRX_PREPARE; return ret; } template static inline uint32_t ws_flags_to_trx_flags_tmpl (uint32_t flags) { assert(0); // remove when needed uint32_t ret(0); if (flags & WriteSetNG::F_COMMIT) ret |= F_COMMIT; if (flags & WriteSetNG::F_ROLLBACK) ret |= F_ROLLBACK; if (flags & WriteSetNG::F_TOI) ret |= F_ISOLATION; if (flags & WriteSetNG::F_PA_UNSAFE) ret |= F_PA_UNSAFE; if (flags & WriteSetNG::F_COMMUTATIVE) ret |= F_COMMUTATIVE; if (flags & WriteSetNG::F_NATIVE) ret |= F_NATIVE; if (flags & WriteSetNG::F_BEGIN) ret |= F_BEGIN; if (flags & WriteSetNG::F_PREORDERED) ret |= F_PREORDERED; if (flags & WriteSetNG::F_PREPARE) ret |= F_PREPARE; return ret; } }; /* class TrxHandle */ template <> inline uint32_t TrxHandle::wsrep_flags_to_trx_flags_tmpl(uint32_t const flags) { return flags; } inline uint32_t TrxHandle::wsrep_flags_to_trx_flags (uint32_t const flags) { return wsrep_flags_to_trx_flags_tmpl(flags); } template <> inline uint32_t TrxHandle::trx_flags_to_wsrep_flags_tmpl(uint32_t flags) { return (flags & WSREP_FLAGS_MASK); } inline uint32_t TrxHandle::trx_flags_to_wsrep_flags (uint32_t const flags) { return trx_flags_to_wsrep_flags_tmpl(flags); } template <> inline uint32_t TrxHandle::ws_flags_to_trx_flags_tmpl(uint32_t flags) { return (flags & TRXHANDLE_FLAGS_MASK); } inline uint32_t TrxHandle::ws_flags_to_trx_flags (uint32_t const flags) { return ws_flags_to_trx_flags_tmpl(flags); } std::ostream& operator<<(std::ostream& os, TrxHandle::State s); std::ostream& operator<<(std::ostream& os, const TrxHandle& trx); class TrxHandleSlave; std::ostream& operator<<(std::ostream& os, const TrxHandleSlave& th); class TrxHandleSlave : public TrxHandle { public: typedef gu::MemPool Pool; static TrxHandleSlave* New(bool local, Pool& pool) { assert(pool.buf_size() == sizeof(TrxHandleSlave)); void* const buf(pool.acquire()); return new(buf) TrxHandleSlave(local, pool, buf); } /** * Adjust flags for backwards compatibility. * * Galera 4.x assigns some write set flags differently from * 3.x. During rolling upgrade these changes need to be * taken into account as 3.x originated write sets may not * have all flags set which are required for replicator internal * operation. The adjustment is done here in order to avoid spreading * the protocol specific changes up to stack. * * In particular the lack of F_BEGIN flag in 3.x needs to be * take care of. * * F_BEGIN - All of the write sets which originate from 3.x * (version < VER5) which have F_COMMIT flag set * must be assigned also F_BEGIN for internal operation. * This is safe because 3.x does not have SR or NBO * implemented, all transactions and TOI write sets * are self contained. * * @param version Write Set wire version * @param flags Flags from write set * * @return Adjusted write set flags compatible with current * implementation. */ static inline uint32_t fixup_write_set_flags(int version, uint32_t flags) { if (version < WriteSetNG::VER5) { if (flags & F_COMMIT) { flags |= F_BEGIN; } } return flags; } template size_t unserialize(const gcs_action& act) { assert(GCS_ACT_WRITESET == act.type); try { version_ = WriteSetNG::version(act.buf, act.size); action_ = std::make_pair(act.buf, act.size); switch (version_) { case WriteSetNG::VER3: case WriteSetNG::VER4: case WriteSetNG::VER5: write_set_.read_buf (act.buf, act.size); assert(version_ == write_set_.version()); write_set_flags_ = fixup_write_set_flags( version_, ws_flags_to_trx_flags(write_set_.flags())); source_id_ = write_set_.source_id(); conn_id_ = write_set_.conn_id(); trx_id_ = write_set_.trx_id(); #ifndef NDEBUG write_set_.verify_checksum(); assert(source_id_ != WSREP_UUID_UNDEFINED); assert(WSREP_SEQNO_UNDEFINED == last_seen_seqno_); assert(WSREP_SEQNO_UNDEFINED == local_seqno_); assert(WSREP_SEQNO_UNDEFINED == last_seen_seqno_); #endif if (from_group) { local_seqno_ = act.seqno_l; global_seqno_ = act.seqno_g; if (write_set_flags_ & F_PREORDERED) { last_seen_seqno_ = global_seqno_ - 1; } else { last_seen_seqno_ = write_set_.last_seen(); } #ifndef NDEBUG assert(last_seen_seqno_ >= 0); if (last_seen_seqno_ >= global_seqno_) { log_fatal << "S: global: " << global_seqno_ << ", last_seen: " << last_seen_seqno_ << ", checksum: " << gu::PrintBase<>(write_set_.get_checksum()); } assert(last_seen_seqno_ < global_seqno_); #endif if (gu_likely(0 == (flags() & (TrxHandle::F_ISOLATION | TrxHandle::F_PA_UNSAFE)))) { assert(WSREP_SEQNO_UNDEFINED == depends_seqno_); if (gu_likely(version_) >= WriteSetNG::VER5) { depends_seqno_ = std::max (last_seen_seqno_ - write_set_.pa_range(), WSREP_SEQNO_UNDEFINED); } /* just in case Galera 3.x uses this don't condition it on version_ */ if (flags() & F_IMPLICIT_DEPS) { assert(last_seen_seqno_ >= depends_seqno_); depends_seqno_ = last_seen_seqno_; } } else { depends_seqno_ = global_seqno_ - 1; } } else { assert(!local_); global_seqno_ = write_set_.seqno(); if (gu_likely(!(nbo_end()))) { depends_seqno_ = global_seqno_-write_set_.pa_range(); assert(depends_seqno_ >= 0); } assert(depends_seqno_ < global_seqno_); certified_ = true; } #ifndef NDEBUG explicit_rollback_ = (write_set_flags_ == EXPLICIT_ROLLBACK_FLAGS); #endif /* NDEBUG */ timestamp_ = write_set_.timestamp(); assert(trx_id() != uint64_t(-1) || is_toi()); sanity_checks(); break; default: gu_throw_error(EPROTONOSUPPORT) <<"Unsupported WS version: " << version_; } return act.size; } catch (gu::Exception& e) { GU_TRACE(e); deserialize_error_log(e); throw; } } void verify_checksum() const /* throws */ { write_set_.verify_checksum(); } void update_stats(gu::Atomic& kc, gu::Atomic& kb, gu::Atomic& db, gu::Atomic& ub) { kc += write_set_.keyset().count(); kb += write_set_.keyset().size(); db += write_set_.dataset().size(); ub += write_set_.unrdset().size(); } bool certified() const { return certified_; } void mark_certified() { assert(!certified_); int dw(0); if (gu_likely(depends_seqno_ >= 0)) { dw = global_seqno_ - depends_seqno_; } /* make sure to not exceed original pa_range() */ assert(version_ < WriteSetNG::VER5 || last_seen_seqno_ - write_set_.pa_range() <= global_seqno_ - dw || preordered()); write_set_.set_seqno(global_seqno_, dw); certified_ = true; } void set_depends_seqno(wsrep_seqno_t const seqno_lt) { /* make sure depends_seqno_ never goes down */ assert(seqno_lt >= depends_seqno_ || seqno_lt == WSREP_SEQNO_UNDEFINED || preordered()); depends_seqno_ = seqno_lt; } void set_global_seqno(wsrep_seqno_t s) // for monitor cancellation { global_seqno_ = s; } void set_state(TrxHandle::State const state, int const line = -1) { TrxHandle::set_state(state, line); } void apply(void* recv_ctx, wsrep_apply_cb_t apply_cb, const wsrep_trx_meta_t& meta, wsrep_bool_t& exit_loop) /* throws */; bool is_committed() const { return committed_; } void mark_committed() { committed_ = true; } void unordered(void* recv_ctx, wsrep_unordered_cb_t apply_cb) const; std::pair action() const { return action_; } wsrep_seqno_t local_seqno() const { return local_seqno_; } wsrep_seqno_t global_seqno() const { return global_seqno_; } wsrep_seqno_t last_seen_seqno() const { return last_seen_seqno_; } wsrep_seqno_t depends_seqno() const { return depends_seqno_; } const WriteSetIn& write_set () const { return write_set_; } bool exit_loop() const { return exit_loop_; } void set_exit_loop(bool x) { exit_loop_ |= x; } typedef gu::UnorderedMap, KeyEntryPtrHash, KeyEntryPtrEqualAll> CertKeySet; void print(std::ostream& os) const; uint64_t get_checksum() const { return write_set_.get_checksum(); } size_t size() const { return write_set_.size(); } void set_ends_nbo(wsrep_seqno_t seqno) { ends_nbo_ = seqno; } wsrep_seqno_t ends_nbo() const { return ends_nbo_; } void mark_dummy(int const line = -2) { set_depends_seqno(WSREP_SEQNO_UNDEFINED); set_flags(flags() | F_ROLLBACK); switch(state()) { case S_CERTIFYING: case S_REPLICATING: set_state(S_ABORTING, line); break; case S_ABORTING: case S_ROLLING_BACK: case S_ROLLED_BACK: break; default: assert(0); } // must be set to S_ROLLED_BACK after commit_cb() } bool is_dummy() const { return (flags() & F_ROLLBACK); } bool skip_event() const { return (flags() == F_ROLLBACK); } bool is_streaming() const { return !((flags() & F_BEGIN) && (flags() & F_COMMIT)); } void cert_bypass(bool const val) { assert(true == val); assert(false == cert_bypass_); cert_bypass_ = val; } bool cert_bypass() const { return cert_bypass_; } bool explicit_rollback() const { bool const ret(flags() == EXPLICIT_ROLLBACK_FLAGS); assert(ret == explicit_rollback_); return ret; } void mark_queued() { assert(!queued_); queued_ = true; } bool queued() const { return queued_; } protected: TrxHandleSlave(bool local, gu::MemPool& mp, void* buf) : TrxHandle (&trans_map_, local), local_seqno_ (WSREP_SEQNO_UNDEFINED), global_seqno_ (WSREP_SEQNO_UNDEFINED), last_seen_seqno_ (WSREP_SEQNO_UNDEFINED), depends_seqno_ (WSREP_SEQNO_UNDEFINED), ends_nbo_ (WSREP_SEQNO_UNDEFINED), mem_pool_ (mp), write_set_ (), buf_ (buf), action_ (static_cast(0), 0), certified_ (false), committed_ (false), exit_loop_ (false), cert_bypass_ (false), queued_ (false) #ifndef NDEBUG ,explicit_rollback_(false) #endif /* NDEBUG */ {} friend class TrxHandleMaster; friend class TransMapBuilder; friend class TrxHandleSlaveDeleter; private: static Fsm::TransMap trans_map_; wsrep_seqno_t local_seqno_; wsrep_seqno_t global_seqno_; wsrep_seqno_t last_seen_seqno_; wsrep_seqno_t depends_seqno_; wsrep_seqno_t ends_nbo_; gu::MemPool& mem_pool_; WriteSetIn write_set_; void* const buf_; std::pair action_; bool certified_; bool committed_; bool exit_loop_; bool cert_bypass_; bool queued_; #ifndef NDEBUG bool explicit_rollback_; #endif /* NDEBUG */ TrxHandleSlave(const TrxHandleSlave&); void operator=(const TrxHandleSlave& other); ~TrxHandleSlave() { #ifndef NDEBUG if (explicit_rollback_) assert (flags() == EXPLICIT_ROLLBACK_FLAGS); #endif /* NDEBUG */ } void destroy_local(void* ptr); void sanity_checks() const; void deserialize_error_log(const gu::Exception& e) const; }; /* TrxHandleSlave */ typedef gu::shared_ptr::type TrxHandleSlavePtr; class TrxHandleSlaveDeleter { public: void operator()(TrxHandleSlave* ptr) { gu::MemPool& mp(ptr->mem_pool_); ptr->~TrxHandleSlave(); mp.recycle(ptr); } }; class TrxHandleMaster : public TrxHandle { public: /* signed int here is to detect SIZE < sizeof(TrxHandle) */ static size_t LOCAL_STORAGE_SIZE() { static size_t const ret(gu_page_size_multiple(1 << 13 /* 8Kb */)); return ret; } struct Params { std::string working_dir_; int version_; KeySet::Version key_format_; gu::RecordSet::Version record_set_ver_; int max_write_set_size_; Params (const std::string& wdir, int ver, KeySet::Version kformat, gu::RecordSet::Version rsv = gu::RecordSet::VER2, int max_write_set_size = WriteSetNG::MAX_SIZE) : working_dir_ (wdir), version_ (ver), key_format_ (kformat), record_set_ver_ (rsv), max_write_set_size_(max_write_set_size) {} Params () : working_dir_(), version_(), key_format_(), record_set_ver_(), max_write_set_size_() {} }; static const Params Defaults; typedef gu::MemPool Pool; static TrxHandleMaster* New(Pool& pool, const Params& params, const wsrep_uuid_t& source_id, wsrep_conn_id_t conn_id, wsrep_trx_id_t trx_id) { size_t const buf_size(pool.buf_size()); assert(buf_size >= (sizeof(TrxHandleMaster) + sizeof(WriteSetOut))); void* const buf(pool.acquire()); return new(buf) TrxHandleMaster(pool, params, source_id, conn_id, trx_id, buf_size); } void lock() { mutex_.lock(); } #ifndef NDEBUG bool locked() { return mutex_.locked(); } bool owned() { return mutex_.owned(); } #endif /* NDEBUG */ void unlock() { assert(locked()); assert(owned()); mutex_.unlock(); } void set_state(TrxHandle::State const s, int const line = -1) { assert(locked()); assert(owned()); TrxHandle::set_state(s, line); } long gcs_handle() const { return gcs_handle_; } void set_gcs_handle(long gcs_handle) { gcs_handle_ = gcs_handle; } void set_flags(uint32_t const flags) // wsrep flags { TrxHandle::set_flags(flags); uint16_t ws_flags(WriteSetNG::wsrep_flags_to_ws_flags(flags)); write_set_out().set_flags(ws_flags); } void append_key(const KeyData& key) { // Current limitations with certification on trx versions 3 to 5 // impose the the following restrictions on keys // The shared key behavior for TOI operations is completely // untested, so don't allow it (and it probably does not even // make any sense) assert(is_toi() == false || key.shared() == false); /*! protection against protocol change during trx lifetime */ if (key.proto_ver != version()) { gu_throw_error(EINVAL) << "key version '" << key.proto_ver << "' does not match to trx version' " << version() << "'"; } gu_trace(write_set_out().append_key(key)); } void append_data(const void* data, const size_t data_len, wsrep_data_type_t type, bool store) { switch (type) { case WSREP_DATA_ORDERED: gu_trace(write_set_out().append_data(data, data_len, store)); break; case WSREP_DATA_UNORDERED: gu_trace(write_set_out().append_unordered(data, data_len,store)); break; case WSREP_DATA_ANNOTATION: gu_trace(write_set_out().append_annotation(data,data_len,store)); break; }; } bool empty() const { return write_set_out().is_empty(); } TrxHandleSlavePtr ts() { return ts_; } void reset_ts() { ts_ = TrxHandleSlavePtr(); } size_t gather(WriteSetNG::GatherVector& out) { set_ws_flags(); return write_set_out().gather(source_id(),conn_id(),trx_id(),out); } void finalize(wsrep_seqno_t const last_seen_seqno) { assert(last_seen_seqno >= 0); assert(ts_ == 0 || last_seen_seqno >= ts_->last_seen_seqno()); int pa_range(pa_range_default()); if (gu_unlikely((flags() & TrxHandle::F_BEGIN) == 0 && (flags() & TrxHandle::F_ISOLATION) == 0)) { /* make sure this fragment depends on the previous */ wsrep_seqno_t prev_seqno(last_ts_seqno_); assert(version() >= WriteSetNG::VER5); assert(prev_seqno >= 0); assert(prev_seqno <= last_seen_seqno); pa_range = std::min(wsrep_seqno_t(pa_range), last_seen_seqno - prev_seqno); } else { assert(ts_ == 0); assert(flags() & TrxHandle::F_ISOLATION || (flags() & TrxHandle::F_ROLLBACK) == 0); } write_set_out().finalize(last_seen_seqno, pa_range); } /* Serializes wiriteset into a single buffer (for unit test purposes) */ void serialize(wsrep_seqno_t const last_seen, std::vector& ret) { set_ws_flags(); write_set_out().serialize(ret, source_id(), conn_id(), trx_id(), last_seen, pa_range_default()); } void clear() { release_write_set_out(); } void add_replicated(TrxHandleSlavePtr ts) { assert(locked()); if ((write_set_flags_ & TrxHandle::F_ISOLATION) == 0) { write_set_flags_ &= ~TrxHandle::F_BEGIN; write_set_flags_ &= ~TrxHandle::F_PREPARE; } ts_ = ts; last_ts_seqno_ = ts_->global_seqno(); } WriteSetOut& write_set_out() { /* WriteSetOut is a temporary object needed only at the writeset * collection stage. Since it may allocate considerable resources * we dont't want it to linger as long as TrxHandle is needed and * want to destroy it ASAP. So it is constructed in the buffer * allocated by TrxHandle::New() immediately following this object */ if (gu_unlikely(!wso_)) init_write_set_out(); assert(wso_); return *static_cast(wso_buf()); } void release_write_set_out() { if (gu_likely(wso_)) { write_set_out().~WriteSetOut(); wso_ = false; } } void set_deferred_abort(bool deferred_abort) { deferred_abort_ = deferred_abort; } bool deferred_abort() const { return deferred_abort_; } private: inline int pa_range_default() const { return (version() >= WriteSetNG::VER5 ? WriteSetNG::MAX_PA_RANGE :0); } inline void set_ws_flags() { uint32_t const wsrep_flags(trx_flags_to_wsrep_flags(flags())); uint16_t const ws_flags (WriteSetNG::wsrep_flags_to_ws_flags(wsrep_flags)); write_set_out().set_flags(ws_flags); } void init_write_set_out() { assert(!wso_); assert(wso_buf_size_ >= sizeof(WriteSetOut)); gu::byte_t* const wso(static_cast(wso_buf())); gu::byte_t* const store(wso + sizeof(WriteSetOut)); assert(params_.version_ >= 0 && params_.version_ <= WriteSetNG::MAX_VERSION); new (wso) WriteSetOut (params_.working_dir_, trx_id(), params_.key_format_, store, wso_buf_size_ - sizeof(WriteSetOut), 0, params_.record_set_ver_, WriteSetNG::Version(params_.version_), DataSet::MAX_VERSION, DataSet::MAX_VERSION, params_.max_write_set_size_); wso_ = true; } const WriteSetOut& write_set_out() const { return const_cast(this)->write_set_out(); } TrxHandleMaster(gu::MemPool& mp, const Params& params, const wsrep_uuid_t& source_id, wsrep_conn_id_t conn_id, wsrep_trx_id_t trx_id, size_t reserved_size) : TrxHandle(&trans_map_, source_id, conn_id, trx_id, params.version_), mutex_ (), mem_pool_ (mp), params_ (params), ts_ (), wso_buf_size_ (reserved_size - sizeof(*this)), gcs_handle_ (-1), wso_ (false), last_ts_seqno_ (WSREP_SEQNO_UNDEFINED), deferred_abort_ (false) { assert(reserved_size > sizeof(*this) + 1024); } void* wso_buf() { return static_cast(this + 1); } ~TrxHandleMaster() { release_write_set_out(); } gu::Mutex mutex_; gu::MemPool& mem_pool_; static Fsm::TransMap trans_map_; Params const params_; TrxHandleSlavePtr ts_; // current fragment handle size_t const wso_buf_size_; int gcs_handle_; bool wso_; wsrep_seqno_t last_ts_seqno_; bool deferred_abort_; friend class TrxHandle; friend class TrxHandleSlave; friend class TrxHandleMasterDeleter; friend class TransMapBuilder; // overrides TrxHandleMaster(const TrxHandleMaster&); TrxHandleMaster& operator=(const TrxHandleMaster&); }; typedef gu::shared_ptr::type TrxHandleMasterPtr; class TrxHandleMasterDeleter { public: void operator()(TrxHandleMaster* ptr) { gu::MemPool& mp(ptr->mem_pool_); ptr->~TrxHandleMaster(); mp.recycle(ptr); } }; class TrxHandleLock { public: TrxHandleLock(TrxHandleMaster& trx) : trx_(trx) , locked_(false) { trx_.lock(); locked_ = true; } ~TrxHandleLock() { if (locked_) { trx_.unlock(); } } void lock() { trx_.lock(); locked_ = true; } void unlock() { assert(locked_ = true); locked_ = false; trx_.unlock(); } private: TrxHandleLock(const TrxHandleLock&); TrxHandleLock& operator=(const TrxHandleLock&); TrxHandleMaster& trx_; bool locked_; }; /* class TrxHnadleLock */ } /* namespace galera*/ #endif // GALERA_TRX_HANDLE_HPP galera-26.4.3/galera/src/wsdb.cpp0000664000177500017540000000745113540715002015052 0ustar dbartmy/* * Copyright (C) 2010-2014 Codership Oy */ #include "wsdb.hpp" #include "trx_handle.hpp" #include "write_set.hpp" #include "gu_lock.hpp" #include "gu_throw.hpp" void galera::Wsdb::print(std::ostream& os) const { os << "trx map:\n"; for (galera::Wsdb::TrxMap::const_iterator i = trx_map_.begin(); i != trx_map_.end(); ++i) { os << i->first << " " << *i->second << "\n"; } os << "conn query map:\n"; for (galera::Wsdb::ConnMap::const_iterator i = conn_map_.begin(); i != conn_map_.end(); ++i) { os << i->first << " "; } os << "\n"; } galera::Wsdb::Wsdb() : trx_pool_ (TrxHandleMaster::LOCAL_STORAGE_SIZE(), 512, "LocalTrxHandle"), trx_map_ (), trx_mutex_ (), conn_map_ (), conn_mutex_() {} galera::Wsdb::~Wsdb() { log_info << "wsdb trx map usage " << trx_map_.size() << " conn query map usage " << conn_map_.size(); log_info << trx_pool_; #ifndef NDEBUG log_info << *this; assert(trx_map_.size() == 0); assert(conn_map_.size() == 0); #endif // !NDEBUG } inline galera::TrxHandleMasterPtr galera::Wsdb::create_trx(const TrxHandleMaster::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t const trx_id) { TrxHandleMasterPtr trx(new_trx(params, source_id, trx_id)); std::pair i (trx_map_.insert(std::make_pair(trx_id, trx))); if (gu_unlikely(i.second == false)) gu_throw_fatal; return i.first->second; } galera::TrxHandleMasterPtr galera::Wsdb::get_trx(const TrxHandleMaster::Params& params, const wsrep_uuid_t& source_id, wsrep_trx_id_t const trx_id, bool const create) { gu::Lock lock(trx_mutex_); TrxMap::iterator const i(trx_map_.find(trx_id)); if (i == trx_map_.end() && create) { return create_trx(params, source_id, trx_id); } else if (i == trx_map_.end()) { return TrxHandleMasterPtr(); } return i->second; } galera::Wsdb::Conn* galera::Wsdb::get_conn(wsrep_conn_id_t const conn_id, bool const create) { gu::Lock lock(conn_mutex_); ConnMap::iterator i(conn_map_.find(conn_id)); if (conn_map_.end() == i) { if (create == true) { std::pair p (conn_map_.insert(std::make_pair(conn_id, Conn(conn_id)))); if (gu_unlikely(p.second == false)) gu_throw_fatal; return &p.first->second; } return 0; } return &(i->second); } galera::TrxHandleMasterPtr galera::Wsdb::get_conn_query(const TrxHandleMaster::Params& params, const wsrep_uuid_t& source_id, wsrep_conn_id_t const conn_id, bool const create) { Conn* const conn(get_conn(conn_id, create)); if (0 == conn) { throw gu::NotFound(); } if (conn->get_trx() == 0 && create == true) { TrxHandleMasterPtr trx (TrxHandleMaster::New(trx_pool_, params, source_id, conn_id, -1), TrxHandleMasterDeleter()); conn->assign_trx(trx); } return conn->get_trx(); } void galera::Wsdb::discard_trx(wsrep_trx_id_t trx_id) { gu::Lock lock(trx_mutex_); TrxMap::iterator i; if ((i = trx_map_.find(trx_id)) != trx_map_.end()) { trx_map_.erase(i); } } void galera::Wsdb::discard_conn_query(wsrep_conn_id_t conn_id) { gu::Lock lock(conn_mutex_); ConnMap::iterator i; if ((i = conn_map_.find(conn_id)) != conn_map_.end()) { i->second.reset_trx(); conn_map_.erase(i); } } galera-26.4.3/galera/src/galera_common.hpp0000664000177500017540000000234213540715002016715 0ustar dbartmy/* * Copyright (C) 2012 Codership Oy */ /*! * @file common.hpp * * @brief Imports definitions from the global common.h */ #ifndef GALERA_COMMON_HPP #define GALERA_COMMON_HPP #if defined(HAVE_COMMON_H) #include #endif #include namespace galera { #if defined(HAVE_COMMON_H) static std::string const BASE_PORT_KEY(COMMON_BASE_PORT_KEY); static std::string const BASE_PORT_DEFAULT(COMMON_BASE_PORT_DEFAULT); static std::string const BASE_HOST_KEY(COMMON_BASE_HOST_KEY); static std::string const BASE_DIR(COMMON_BASE_DIR_KEY); static std::string const BASE_DIR_DEFAULT(COMMON_BASE_DIR_DEFAULT); static std::string const GALERA_STATE_FILE(COMMON_STATE_FILE); static std::string const VIEW_STATE_FILE(COMMON_VIEW_STAT_FILE); #else static std::string const BASE_PORT_KEY("base_port"); static std::string const BASE_PORT_DEFAULT("4567"); static std::string const BASE_HOST_KEY("base_host"); static std::string const BASE_DIR("base_dir"); static std::string const BASE_DIR_DEFAULT("."); static std::string const GALERA_STATE_FILE("grastate.dat"); static std::string const VIEW_STATE_FILE("gvwstate.dat"); #endif } #endif /* GALERA_COMMON_HPP */ galera-26.4.3/galera/src/certification.cpp0000664000177500017540000012474713540715002016746 0ustar dbartmy// // Copyright (C) 2010-2018 Codership Oy // #include "certification.hpp" #include "gu_lock.hpp" #include "gu_throw.hpp" #include #include // std::for_each using namespace galera; static const bool cert_debug_on(false); #define cert_debug \ if (cert_debug_on == false) { } \ else log_info << "cert debug: " #define CERT_PARAM_LOG_CONFLICTS galera::Certification::PARAM_LOG_CONFLICTS #define CERT_PARAM_OPTIMISTIC_PA galera::Certification::PARAM_OPTIMISTIC_PA static std::string const CERT_PARAM_PREFIX("cert."); std::string const CERT_PARAM_LOG_CONFLICTS(CERT_PARAM_PREFIX + "log_conflicts"); std::string const CERT_PARAM_OPTIMISTIC_PA(CERT_PARAM_PREFIX + "optimistic_pa"); static std::string const CERT_PARAM_MAX_LENGTH (CERT_PARAM_PREFIX + "max_length"); static std::string const CERT_PARAM_LENGTH_CHECK (CERT_PARAM_PREFIX + "length_check"); static std::string const CERT_PARAM_LOG_CONFLICTS_DEFAULT("no"); static std::string const CERT_PARAM_OPTIMISTIC_PA_DEFAULT("yes"); /*** It is EXTREMELY important that these constants are the same on all nodes. *** Don't change them ever!!! ***/ static std::string const CERT_PARAM_MAX_LENGTH_DEFAULT("16384"); static std::string const CERT_PARAM_LENGTH_CHECK_DEFAULT("127"); void galera::Certification::register_params(gu::Config& cnf) { cnf.add(CERT_PARAM_LOG_CONFLICTS, CERT_PARAM_LOG_CONFLICTS_DEFAULT); cnf.add(CERT_PARAM_OPTIMISTIC_PA, CERT_PARAM_OPTIMISTIC_PA_DEFAULT); /* The defaults below are deliberately not reflected in conf: people * should not know about these dangerous setting unless they read RTFM. */ cnf.add(CERT_PARAM_MAX_LENGTH); cnf.add(CERT_PARAM_LENGTH_CHECK); } /* a function to get around unset defaults in ctor initialization list */ static int max_length(const gu::Config& conf) { if (conf.is_set(CERT_PARAM_MAX_LENGTH)) return conf.get(CERT_PARAM_MAX_LENGTH); else return gu::Config::from_config(CERT_PARAM_MAX_LENGTH_DEFAULT); } /* a function to get around unset defaults in ctor initialization list */ static int length_check(const gu::Config& conf) { if (conf.is_set(CERT_PARAM_LENGTH_CHECK)) return conf.get(CERT_PARAM_LENGTH_CHECK); else return gu::Config::from_config(CERT_PARAM_LENGTH_CHECK_DEFAULT); } // Purge key set from given index static void purge_key_set(galera::Certification::CertIndexNG& cert_index, galera::TrxHandleSlave* ts, const galera::KeySetIn& key_set, const long count) { for (long i(0); i < count; ++i) { const galera::KeySet::KeyPart& kp(key_set.next()); galera::KeyEntryNG ke(kp); galera::Certification::CertIndexNG::iterator ci(cert_index.find(&ke)); assert(ci != cert_index.end()); if (ci == cert_index.end()) { log_warn << "Could not find key from index"; continue; } galera::KeyEntryNG* const kep(*ci); assert(kep->referenced() == true); wsrep_key_type_t const p(kp.wsrep_type(ts->version())); if (kep->ref_trx(p) == ts) { kep->unref(p, ts); if (kep->referenced() == false) { cert_index.erase(ci); delete kep; } } } } void galera::Certification::purge_for_trx(TrxHandleSlave* trx) { assert(mutex_.owned()); assert(trx->version() >= 3 || trx->version() <= 5); const KeySetIn& keys(trx->write_set().keyset()); keys.rewind(); purge_key_set(cert_index_ng_, trx, keys, keys.count()); } /* Specifically for chain use in certify_and_depend_v3to5() */ template bool check_against(const galera::KeyEntryNG* const found, const galera::KeySet::KeyPart& key, wsrep_key_type_t const key_type, galera::TrxHandleSlave* const trx, bool const log_conflict, wsrep_seqno_t& depends_seqno) { enum CheckType { CONFLICT, DEPENDENCY, NOTHING }; static CheckType const check_table [WSREP_KEY_EXCLUSIVE+1][WSREP_KEY_EXCLUSIVE+1] = { // SH RE UP EX second / first { NOTHING, NOTHING, DEPENDENCY, DEPENDENCY }, // SH { NOTHING, NOTHING, DEPENDENCY, CONFLICT }, // RE { DEPENDENCY, DEPENDENCY, CONFLICT, CONFLICT }, // UP { CONFLICT, CONFLICT, CONFLICT, CONFLICT } // EX }; const galera::TrxHandleSlave* const ref_trx(found->ref_trx(REF_KEY_TYPE)); // trx should not have any references in index at this point assert(ref_trx != trx); bool conflict(false); if (gu_likely(0 != ref_trx)) { if ((REF_KEY_TYPE == WSREP_KEY_EXCLUSIVE || REF_KEY_TYPE == WSREP_KEY_UPDATE) && ref_trx) { cert_debug << KeySet::type(REF_KEY_TYPE) << " match: " << *trx << " <---> " << *ref_trx; } if (REF_KEY_TYPE == WSREP_KEY_SHARED || REF_KEY_TYPE == WSREP_KEY_REFERENCE || REF_KEY_TYPE == WSREP_KEY_UPDATE) assert(!ref_trx->is_toi()); CheckType const check_type(check_table[REF_KEY_TYPE][key_type]); switch (check_type) { case CONFLICT: // cert conflict takes place if // 1) write sets originated from different nodes, are within cert // range // 2) ref_trx is in isolation mode, write sets are within cert range // 3) Trx has not been certified yet. Already certified trxs show up // here during index rebuild. conflict = (ref_trx->global_seqno() > trx->last_seen_seqno() && (ref_trx->is_toi() || trx->source_id() != ref_trx->source_id()) && trx->certified() == false); if (gu_unlikely(cert_debug_on || (conflict && log_conflict == true))) { log_info << KeySet::type(key_type) << '-' << KeySet::type(REF_KEY_TYPE) << " trx " << (conflict ? "conflict" : "match") << " for key " << key << ": " << *trx << " <---> " << *ref_trx; } /* fall through */ case DEPENDENCY: if (conflict) depends_seqno = WSREP_SEQNO_UNDEFINED; else depends_seqno = std::max(ref_trx->global_seqno(), depends_seqno); /* fall through */ case NOTHING:; } } return conflict; } /*! for convenience returns true if conflict and false if not */ static inline bool certify_and_depend_v3to5(const galera::KeyEntryNG* const found, const galera::KeySet::KeyPart& key, galera::TrxHandleSlave* const trx, bool const log_conflict) { wsrep_seqno_t depends_seqno(trx->depends_seqno()); wsrep_key_type_t const key_type(key.wsrep_type(trx->version())); /* * The following cascade implements these rules: * * | ex | up | re | sh | <- horizontal axis: trx key type * ------------------------ vertical axis: found key type * ex | C | C | C | C | * ------------------------ * up | C | C | D | D | * ------------------------ C - conflict * re | C | D | N | N | D - dependency * ------------------------ N - nothing * sh | D | D | N | N | * ------------------------ * * Note that depends_seqno is an in/out parameter and is updated on every * step. */ if (check_against (found, key, key_type, trx, log_conflict, depends_seqno) || check_against (found, key, key_type, trx, log_conflict, depends_seqno) || (key_type >= WSREP_KEY_UPDATE && /* exclusive keys must be checked against shared */ (check_against (found, key, key_type, trx, log_conflict, depends_seqno) || check_against (found, key, key_type, trx, log_conflict, depends_seqno)))) { return true; } else { if (depends_seqno > trx->depends_seqno()) trx->set_depends_seqno(depends_seqno); return false; } } /* returns true on collision, false otherwise */ static bool certify_v3to5(galera::Certification::CertIndexNG& cert_index_ng, const galera::KeySet::KeyPart& key, galera::TrxHandleSlave* const trx, bool const store_keys, bool const log_conflicts) { galera::KeyEntryNG ke(key); galera::Certification::CertIndexNG::iterator ci(cert_index_ng.find(&ke)); if (cert_index_ng.end() == ci) { if (store_keys) { galera::KeyEntryNG* const kep(new galera::KeyEntryNG(ke)); ci = cert_index_ng.insert(kep).first; cert_debug << "created new entry"; } return false; } else { cert_debug << "found existing entry"; galera::KeyEntryNG* const kep(*ci); // Note: For we skip certification for isolated trxs, only // cert index and key_list is populated. return (!trx->is_toi() && certify_and_depend_v3to5(kep, key, trx, log_conflicts)); } } // Add key to trx references for trx that passed certification. // // @param cert_index certification index in use // @param trx certified transaction // @param key_set key_set used in certification // @param key_count number of keys in key set static void do_ref_keys(galera::Certification::CertIndexNG& cert_index, galera::TrxHandleSlave* const trx, const galera::KeySetIn& key_set, const long key_count) { for (long i(0); i < key_count; ++i) { const galera::KeySet::KeyPart& k(key_set.next()); galera::KeyEntryNG ke(k); galera::Certification::CertIndexNG::const_iterator ci(cert_index.find(&ke)); if (ci == cert_index.end()) { gu_throw_fatal << "could not find key '" << k << "' from cert index"; } (*ci)->ref(k.wsrep_type(trx->version()), k, trx); } } // Clean up keys from index that were added by trx that failed // certification. // // @param cert_index certification inde // @param key_set key_set used in certification // @param processed number of keys that were processed in certification static void do_clean_keys(galera::Certification::CertIndexNG& cert_index, const galera::TrxHandleSlave* const trx, const galera::KeySetIn& key_set, const long processed) { /* 'strictly less' comparison is essential in the following loop: * processed key failed cert and was not added to index */ for (long i(0); i < processed; ++i) { KeyEntryNG ke(key_set.next()); // Clean up cert index from entries which were added by this trx galera::Certification::CertIndexNG::iterator ci(cert_index.find(&ke)); if (gu_likely(ci != cert_index.end())) { galera::KeyEntryNG* kep(*ci); if (kep->referenced() == false) { // kel was added to cert_index_ by this trx - // remove from cert_index_ and fall through to delete cert_index.erase(ci); } else continue; assert(kep->referenced() == false); delete kep; } else if(ke.key().wsrep_type(trx->version()) == WSREP_KEY_SHARED) { assert(0); // we actually should never be here, the key should // be either added to cert_index_ or be there already log_warn << "could not find shared key '" << ke.key() << "' from cert index"; } else { /* non-shared keys can duplicate shared in the key set */ } } } galera::Certification::TestResult galera::Certification::do_test_v3to5(TrxHandleSlave* trx, bool store_keys) { cert_debug << "BEGIN CERTIFICATION v" << trx->version() << ": " << *trx; #ifndef NDEBUG // to check that cleanup after cert failure returns cert_index // to original size size_t prev_cert_index_size(cert_index_ng_.size()); #endif // NDEBUG const KeySetIn& key_set(trx->write_set().keyset()); long const key_count(key_set.count()); long processed(0); key_set.rewind(); for (; processed < key_count; ++processed) { const KeySet::KeyPart& key(key_set.next()); if (certify_v3to5(cert_index_ng_, key, trx, store_keys, log_conflicts_)) { goto cert_fail; } } trx->set_depends_seqno(std::max(trx->depends_seqno(), last_pa_unsafe_)); if (store_keys == true) { assert (key_count == processed); key_set.rewind(); do_ref_keys(cert_index_ng_, trx, key_set, key_count); if (trx->pa_unsafe()) last_pa_unsafe_ = trx->global_seqno(); key_count_ += key_count; } cert_debug << "END CERTIFICATION (success): " << *trx; return TEST_OK; cert_fail: cert_debug << "END CERTIFICATION (failed): " << *trx; assert (processed < key_count); if (store_keys == true) { /* Clean up key entries allocated for this trx */ key_set.rewind(); do_clean_keys(cert_index_ng_, trx, key_set, processed); assert(cert_index_ng_.size() == prev_cert_index_size); } return TEST_FAILED; } /* Determine whether a given trx can be correctly certified under the * certification protocol currently established in the group (cert_version) * Certification protocols from 1 to 3 could only handle writesets of the same * version. Certification protocol 4 can handle writesets of both version 3 * and 4 */ static inline bool trx_cert_version_match(int const trx_version, int const cert_version) { if (cert_version <= 3) { return (trx_version == cert_version); } else { return (trx_version >= 3 && trx_version <= cert_version); } } galera::Certification::TestResult galera::Certification::do_test(const TrxHandleSlavePtr& trx, bool store_keys) { assert(trx->source_id() != WSREP_UUID_UNDEFINED); if (!trx_cert_version_match(trx->version(), version_)) { log_warn << "trx protocol version: " << trx->version() << " does not match certification protocol version: " << version_; return TEST_FAILED; } // trx->is_certified() == true during index rebuild from IST, do_test() // must not fail, just populate index if (gu_unlikely(trx->certified() == false && (trx->last_seen_seqno() < initial_position_ || trx->global_seqno()-trx->last_seen_seqno() > max_length_))) { if (trx->global_seqno() - trx->last_seen_seqno() > max_length_) { log_warn << "certification interval for trx " << *trx << " exceeds the limit of " << max_length_; } return TEST_FAILED; } TestResult res(TEST_FAILED); gu::Lock lock(mutex_); // why do we need that? - e.g. set_trx_committed() /* initialize parent seqno */ if (gu_unlikely(trx_map_.empty())) { trx->set_depends_seqno(trx->global_seqno() - 1); } else { if (optimistic_pa_ == false && trx->last_seen_seqno() > trx->depends_seqno()) trx->set_depends_seqno(trx->last_seen_seqno()); wsrep_seqno_t const ds(trx_map_.begin()->first - 1); if (ds > trx->depends_seqno()) trx->set_depends_seqno(ds); } switch (version_) { case 1: case 2: break; case 3: case 4: case 5: res = do_test_v3to5(trx.get(), store_keys); break; default: gu_throw_fatal << "certification test for version " << version_ << " not implemented"; } assert(TEST_FAILED == res || trx->depends_seqno() >= 0); if (store_keys == true && res == TEST_OK) { ++trx_count_; gu::Lock lock(stats_mutex_); ++n_certified_; deps_dist_ += (trx->global_seqno() - trx->depends_seqno()); cert_interval_ += (trx->global_seqno() - trx->last_seen_seqno() - 1); index_size_ = cert_index_ng_.size(); } // Additional NBO certification. if (trx->flags() & TrxHandle::F_ISOLATION) { res = do_test_nbo(trx); assert(TEST_FAILED == res || trx->depends_seqno() >= 0); } byte_count_ += trx->size(); return res; } galera::Certification::TestResult galera::Certification::do_test_preordered(TrxHandleSlave* trx) { /* Source ID is not always available for preordered events (e.g. event * producer didn't provide any) so for now we must accept undefined IDs. */ //assert(trx->source_id() != WSREP_UUID_UNDEFINED); assert(trx->version() >= 3); assert(trx->preordered()); /* we don't want to go any further unless the writeset checksum is ok */ trx->verify_checksum(); // throws /* if checksum failed we need to throw ASAP, let the caller catch it, * flush monitors, save state and abort. */ /* This is a primitive certification test for preordered actions: * it does not handle gaps and relies on general apply monitor for * parallel applying. Ideally there should be a certification object * per source. */ if (gu_unlikely(last_preordered_id_ && (last_preordered_id_ + 1 != trx->trx_id()))) { log_warn << "Gap in preordered stream: source_id '" << trx->source_id() << "', trx_id " << trx->trx_id() << ", previous id " << last_preordered_id_; assert(0); } trx->set_depends_seqno(last_preordered_seqno_ + 1 - trx->write_set().pa_range()); // +1 compensates for subtracting from a previous seqno, rather than own. trx->mark_certified(); last_preordered_seqno_ = trx->global_seqno(); last_preordered_id_ = trx->trx_id(); return TEST_OK; } // // non-blocking operations // // Prepare a copy of TrxHandleSlave with private storage galera::NBOEntry copy_ts( galera::TrxHandleSlave* ts, galera::TrxHandleSlave::Pool& pool, gu::shared_ptr::type nbo_ctx) { // FIXME: Pass proper working directory from config to MappedBuffer ctor gu::shared_ptr::type buf( new galera::MappedBuffer("/tmp")); assert(ts->action().first && ts->action().second); if (ts->action().first == 0) { gu_throw_fatal << "Unassigned action pointer for transaction, cannot make a copy of: " << *ts; } buf->resize(ts->action().second); std::copy(static_cast(ts->action().first), static_cast(ts->action().first) + ts->action().second, buf->begin()); galera::TrxHandleSlaveDeleter d; gu::shared_ptr::type new_ts( galera::TrxHandleSlave::New(ts->local(), pool), d); if (buf->size() > size_t(std::numeric_limits::max())) gu_throw_error(ERANGE) << "Buffer size " << buf->size() << " out of range"; gcs_action act = {ts->global_seqno(), ts->local_seqno(), &(*buf)[0], static_cast(buf->size()), GCS_ACT_WRITESET}; if (ts->certified() == false) { // TrxHandleSlave is from group gu_trace(new_ts->unserialize(act)); } else { // TrxHandleSlave is from IST gu_trace(new_ts->unserialize(act)); } new_ts->set_local(ts->local()); return galera::NBOEntry(new_ts, buf, nbo_ctx); } static void purge_key_set_nbo(galera::Certification::CertIndexNBO& cert_index, bool is_nbo_index, galera::TrxHandleSlave* ts, const galera::KeySetIn& key_set, const long count) { using galera::Certification; using galera::KeyEntryNG; using galera::KeySet; key_set.rewind(); for (long i(0); i < count; ++i) { KeyEntryNG ke(key_set.next()); std::pair ci_range(cert_index.equal_range(&ke)); assert(std::distance(ci_range.first, ci_range.second) >= 1); wsrep_key_type_t const p(ke.key().wsrep_type(ts->version())); Certification::CertIndexNBO::iterator ci; for (ci = ci_range.first; ci != ci_range.second; ++ci) { if ((*ci)->ref_trx(p) == ts) break; } assert(ci != ci_range.second); if (ci == ci_range.second) { log_warn << "purge_key_set_nbo(): Could not find key " << ke.key() << " from NBO index, skipping"; continue; } KeyEntryNG* const kep(*ci); assert(kep->referenced() == true); kep->unref(p, ts); assert(kep->referenced() == false); cert_index.erase(ci); delete kep; } } static void end_nbo(galera::NBOMap::iterator i, galera::TrxHandleSlavePtr ts, galera::Certification::CertIndexNBO& nbo_index, galera::NBOMap& nbo_map) { NBOEntry& e(i->second); log_debug << "Ending NBO started by " << *e.ts_ptr(); // Erase entry from index const KeySetIn& key_set(e.ts_ptr()->write_set().keyset()); purge_key_set_nbo(nbo_index, true, e.ts_ptr(), key_set, key_set.count()); ts->set_ends_nbo(e.ts_ptr()->global_seqno()); nbo_map.erase(i); } gu::shared_ptr::type galera::Certification::nbo_ctx_unlocked( wsrep_seqno_t const seqno) { // This will either // * Insert a new NBOCtx shared_ptr into ctx map if one didn't exist // before, or // * Return existing entry, while newly created shared ptr gets freed // automatically when it goes out of scope return nbo_ctx_map_.insert( std::make_pair(seqno, gu::make_shared())).first->second; } gu::shared_ptr::type galera::Certification::nbo_ctx( wsrep_seqno_t const seqno) { assert(seqno > 0); gu::Lock lock(mutex_); return nbo_ctx_unlocked(seqno); } void galera::Certification::erase_nbo_ctx(wsrep_seqno_t const seqno) { assert(seqno > 0); gu::Lock lock(mutex_); size_t n_erased(nbo_ctx_map_.erase(seqno)); assert(n_erased == 1); (void)n_erased; } static bool is_exclusive(const galera::KeyEntryNG* ke) { assert(ke != 0); assert((ke->ref_trx(WSREP_KEY_SHARED) || ke->ref_trx(WSREP_KEY_REFERENCE) || ke->ref_trx(WSREP_KEY_UPDATE) || ke->ref_trx(WSREP_KEY_EXCLUSIVE)) && !(ke->ref_trx(WSREP_KEY_SHARED) && ke->ref_trx(WSREP_KEY_REFERENCE) && ke->ref_trx(WSREP_KEY_UPDATE) && ke->ref_trx(WSREP_KEY_EXCLUSIVE))); return (ke->ref_trx(WSREP_KEY_EXCLUSIVE) != 0 || ke->ref_trx(WSREP_KEY_UPDATE) != 0); } static bool certify_nbo(galera::Certification::CertIndexNBO& cert_index, const galera::KeySet::KeyPart& key, galera::TrxHandleSlave* const trx, bool const log_conflicts) { using galera::KeyEntryNG; using galera::Certification; using galera::TrxHandleSlave; KeyEntryNG ke(key); std::pair it(cert_index.equal_range(&ke)); // Certification is done over whole index as opposed to regular // write set certification where only given range is used // If found range is non-empty, it must be either single exclusive // key or all shared. assert(std::count_if(it.first, it.second, is_exclusive) == 0 || std::distance(it.first, it.second) == 1); Certification::CertIndexNBO::iterator i; if ((i = std::find_if(it.first, it.second, is_exclusive)) != cert_index.end()) { if (gu_unlikely(log_conflicts == true)) { const TrxHandleSlave* ref_trx((*i)->ref_trx(WSREP_KEY_EXCLUSIVE)); assert(ref_trx != 0); log_info << "NBO conflict for key " << key << ": " << *trx << " <--X--> " << *ref_trx; } return true; } return false; } static void do_ref_keys_nbo(galera::Certification::CertIndexNBO& index, TrxHandleSlave* const trx, const galera::KeySetIn& key_set, const long key_count) { using galera::KeySet; using galera::KeyEntryNG; using galera::Certification; key_set.rewind(); for (long i(0); i < key_count; ++i) { const KeySet::KeyPart& key(key_set.next()); wsrep_key_type_t const type(key.wsrep_type(trx->version())); KeyEntryNG* kep (new KeyEntryNG(key)); Certification::CertIndexNBO::iterator it; assert((it = index.find(kep)) == index.end() || (*it)->ref_trx(type) != trx); it = index.insert(kep); (*it)->ref(type, key, trx); } } galera::Certification::TestResult galera::Certification::do_test_nbo( const TrxHandleSlavePtr& ts) { assert(!ts->is_dummy()); assert(ts->flags() & TrxHandle::F_ISOLATION); assert(ts->flags() & (TrxHandle::F_BEGIN | TrxHandle::F_COMMIT)); if (nbo_position_ >= ts->global_seqno()) { // This is part of cert index preload, needs to be dropped since // it is already processed by this node before partitioning. assert(ts->certified() == true); // Return TEST_OK. If the trx is in index preload, it has // passed certification on donor. log_debug << "Dropping NBO " << *ts; return TEST_OK; } nbo_position_ = ts->global_seqno(); #ifndef NDEBUG size_t prev_nbo_index_size(nbo_index_.size()); #endif // NDEBUG bool ineffective(false); galera::Certification::TestResult ret(TEST_OK); if ((ts->flags() & TrxHandle::F_BEGIN) && (ts->flags() & TrxHandle::F_COMMIT)) { // Old school atomic TOI log_debug << "TOI: " << *ts; const KeySetIn& key_set(ts->write_set().keyset()); long const key_count(key_set.count()); long processed(0); key_set.rewind(); for (; processed < key_count; ++processed) { const KeySet::KeyPart& key(key_set.next()); if (certify_nbo(nbo_index_, key, ts.get(), log_conflicts_)) { ret = TEST_FAILED; break; } } log_debug << "NBO test result " << ret << " for TOI " << *ts; // Atomic TOI should not cause change in NBO index assert(prev_nbo_index_size == nbo_index_.size()); } else if (ts->flags() & TrxHandle::F_BEGIN) { // Beginning of non-blocking operation log_debug << "NBO start: " << *ts; // We need a copy of ts since the lifetime of NBO may exceed // the lifetime of the buffer in GCache NBOEntry entry(copy_ts(ts.get(), nbo_pool_, nbo_ctx_unlocked( ts->global_seqno()))); TrxHandleSlave* new_ts(entry.ts_ptr()); const KeySetIn& key_set(new_ts->write_set().keyset()); long const key_count(key_set.count()); long processed(0); key_set.rewind(); for (; processed < key_count; ++processed) { const KeySet::KeyPart& key(key_set.next()); if (certify_nbo(nbo_index_, key, new_ts, log_conflicts_)) { ret = TEST_FAILED; break; } } switch (ret) { case TEST_OK: do_ref_keys_nbo(nbo_index_, new_ts, key_set, key_count); nbo_map_.insert(std::make_pair(new_ts->global_seqno(), entry)); break; case TEST_FAILED: // Clean keys not needed here since certify_nbo() // does not insert them into nbo_index_ break; } } else { assert(ts->nbo_end()); // End of non-blocking operation log_debug << "NBO end: " << *ts; ineffective = true; NBOKey key; const DataSetIn& ws(ts->write_set().dataset()); ws.rewind(); assert(ws.count() == 1); if (ws.count() != 1) gu_throw_fatal << "Invalid dataset count in " << *ts; gu::Buf buf(ws.next()); key.unserialize(static_cast(buf.ptr), buf.size, 0); NBOMap::iterator i(nbo_map_.find(key)); if (i != nbo_map_.end()) { NBOEntry& e(i->second); e.add_ended(ts->source_id()); if (ts->local() == true) { // Clear NBO context aborted flag if it is set by // intermediate view change. e.nbo_ctx()->set_aborted(false); } if (current_view_.subset_of(e.ended_set())) { // All nodes of the current primary view have // ended the operation. end_nbo(i, ts, nbo_index_, nbo_map_); ineffective = false; } } else { log_warn << "no corresponding NBO begin found for NBO end " << *ts; } } if (gu_likely(TEST_OK == ret)) { ts->set_depends_seqno(ts->global_seqno() - 1); if (gu_unlikely(ineffective)) { assert(ts->nbo_end()); assert(ts->ends_nbo() == WSREP_SEQNO_UNDEFINED); ret = TEST_FAILED; } } assert(TEST_FAILED == ret || ts->depends_seqno() >= 0); return ret; } galera::Certification::Certification(gu::Config& conf, ServiceThd* thd) : version_ (-1), conf_ (conf), trx_map_ (), cert_index_ng_ (), nbo_map_ (), nbo_ctx_map_ (), nbo_index_ (), nbo_pool_ (sizeof(TrxHandleSlave)), deps_set_ (), current_view_ (), service_thd_ (thd), mutex_ (), trx_size_warn_count_ (0), initial_position_ (-1), position_ (-1), nbo_position_ (-1), safe_to_discard_seqno_ (-1), last_pa_unsafe_ (-1), last_preordered_seqno_ (position_), last_preordered_id_ (0), stats_mutex_ (), n_certified_ (0), deps_dist_ (0), cert_interval_ (0), index_size_ (0), key_count_ (0), byte_count_ (0), trx_count_ (0), max_length_ (max_length(conf)), max_length_check_ (length_check(conf)), inconsistent_ (false), log_conflicts_ (conf.get(CERT_PARAM_LOG_CONFLICTS)), optimistic_pa_ (conf.get(CERT_PARAM_OPTIMISTIC_PA)) {} galera::Certification::~Certification() { log_info << "cert index usage at exit " << cert_index_ng_.size(); log_info << "cert trx map usage at exit " << trx_map_.size(); log_info << "deps set usage at exit " << deps_set_.size(); double avg_cert_interval(0); double avg_deps_dist(0); size_t index_size(0); stats_get(avg_cert_interval, avg_deps_dist, index_size); log_info << "avg deps dist " << avg_deps_dist; log_info << "avg cert interval " << avg_cert_interval; log_info << "cert index size " << index_size; gu::Lock lock(mutex_); for_each(trx_map_.begin(), trx_map_.end(), PurgeAndDiscard(*this)); trx_map_.clear(); nbo_map_.clear(); if (service_thd_) { service_thd_->release_seqno(position_); service_thd_->flush(gu::UUID()); } } void galera::Certification::assign_initial_position(const gu::GTID& gtid, int const version) { assert(gtid.seqno() >= 0 || gtid == gu::GTID()); switch (version) { // value -1 used in initialization when trx protocol version is not // available case -1: case 1: case 2: case 3: case 4: case 5: break; default: gu_throw_fatal << "certification/trx version " << version << " not supported"; } wsrep_seqno_t const seqno(gtid.seqno()); gu::Lock lock(mutex_); std::for_each(trx_map_.begin(), trx_map_.end(), PurgeAndDiscard(*this)); if (seqno >= position_) { assert(cert_index_ng_.size() == 0); } else { if (seqno > 0) // don't warn on index reset. { log_warn << "moving position backwards: " << position_ << " -> " << seqno; } std::for_each(cert_index_ng_.begin(), cert_index_ng_.end(), gu::DeleteObject()); cert_index_ng_.clear(); } trx_map_.clear(); assert(cert_index_ng_.empty()); if (service_thd_) { service_thd_->release_seqno(position_); service_thd_->flush(gtid.uuid()); } log_info << "####### Assign initial position for certification: " << gtid << ", protocol version: " << version; initial_position_ = seqno; position_ = seqno; safe_to_discard_seqno_ = seqno; last_pa_unsafe_ = seqno; last_preordered_seqno_ = position_; last_preordered_id_ = 0; version_ = version; } void galera::Certification::adjust_position(const View& view, const gu::GTID& gtid, int const version) { assert(gtid.uuid() != GU_UUID_NIL); assert(gtid.seqno() >= 0); gu::Lock lock(mutex_); // this assert is too strong: local ordered transactions may get canceled without // entering certification assert(position_ + 1 == seqno || 0 == position_); log_info << "####### Adjusting cert position: " << position_ << " -> " << gtid.seqno(); if (version != version_) { std::for_each(trx_map_.begin(), trx_map_.end(), PurgeAndDiscard(*this)); assert(trx_map_.empty() || trx_map_.rbegin()->first + 1 == position_); trx_map_.clear(); assert(cert_index_ng_.empty()); if (service_thd_) { service_thd_->release_seqno(position_); } } if (service_thd_) { service_thd_->flush(gtid.uuid()); } position_ = gtid.seqno(); last_pa_unsafe_ = position_; version_ = version; current_view_ = view; // Loop over NBO entries, clear state and abort waiters. NBO end waiters // must resend end messages. for (NBOMap::iterator i(nbo_map_.begin()); i != nbo_map_.end(); ++i) { NBOEntry& e(i->second); e.clear_ended(); e.nbo_ctx()->set_aborted(true); } } wsrep_seqno_t galera::Certification::increment_position() { gu::Lock lock(mutex_); position_++; return position_; } galera::Certification::TestResult galera::Certification::test(const TrxHandleSlavePtr& trx, bool store_keys) { assert(trx->global_seqno() >= 0 /* && trx->local_seqno() >= 0 */); const TestResult ret (trx->preordered() ? do_test_preordered(trx.get()) : do_test(trx, store_keys)); assert(TEST_FAILED == ret || trx->depends_seqno() >= 0); if (gu_unlikely(ret != TEST_OK)) { trx->mark_dummy(__LINE__); } return ret; } wsrep_seqno_t galera::Certification::get_safe_to_discard_seqno_() const { wsrep_seqno_t retval; if (deps_set_.empty() == true) { retval = safe_to_discard_seqno_; } else { retval = (*deps_set_.begin()) - 1; } return retval; } wsrep_seqno_t galera::Certification::purge_trxs_upto_(wsrep_seqno_t const seqno, bool const handle_gcache) { assert (seqno > 0); TrxMap::iterator purge_bound(trx_map_.upper_bound(seqno)); cert_debug << "purging index up to " << seqno; for_each(trx_map_.begin(), purge_bound, PurgeAndDiscard(*this)); trx_map_.erase(trx_map_.begin(), purge_bound); if (handle_gcache && service_thd_) service_thd_->release_seqno(seqno); if (0 == ((trx_map_.size() + 1) % 10000)) { log_debug << "trx map after purge: length: " << trx_map_.size() << ", requested purge seqno: " << seqno << ", real purge seqno: " << trx_map_.begin()->first - 1; } return seqno; } galera::Certification::TestResult galera::Certification::append_trx(const TrxHandleSlavePtr& trx) { // explicit ROLLBACK is dummy() assert(!trx->is_dummy()); assert(trx->global_seqno() >= 0 /* && trx->local_seqno() >= 0 */); assert(trx->global_seqno() > position_); #ifndef NDEBUG bool const explicit_rollback(trx->explicit_rollback()); #endif /* NDEBUG */ { gu::Lock lock(mutex_); if (gu_unlikely(trx->global_seqno() != position_ + 1)) { // this is perfectly normal if trx is rolled back just after // replication, keeping the log though log_debug << "seqno gap, position: " << position_ << " trx seqno " << trx->global_seqno(); } if (gu_unlikely((trx->last_seen_seqno() + 1) < trx_map_.begin()->first)) { /* See #733 - for now it is false positive */ cert_debug << "WARNING: last_seen_seqno is below certification index: " << trx_map_.begin()->first << " > " << trx->last_seen_seqno(); } position_ = trx->global_seqno(); if (gu_unlikely(!(position_ & max_length_check_) && (trx_map_.size() > static_cast(max_length_)))) { log_debug << "trx map size: " << trx_map_.size() << " - check if status.last_committed is incrementing"; wsrep_seqno_t trim_seqno(position_ - max_length_); wsrep_seqno_t const stds (get_safe_to_discard_seqno_()); if (trim_seqno > stds) { log_warn << "Attempt to trim certification index at " << trim_seqno << ", above safe-to-discard: " << stds; trim_seqno = stds; } else { cert_debug << "purging index up to " << trim_seqno; } purge_trxs_upto_(trim_seqno, true); } } const TestResult retval(test(trx, true)); { assert(trx->global_seqno() > 0); gu::Lock lock(mutex_); if (trx_map_.insert( std::make_pair(trx->global_seqno(), trx)).second == false) gu_throw_fatal << "duplicate trx entry " << *trx; // trx with local seqno WSREP_SEQNO_UNDEFINED originates from // IST so deps set tracking should not be done if (trx->local_seqno() != WSREP_SEQNO_UNDEFINED) { assert(trx->last_seen_seqno() != WSREP_SEQNO_UNDEFINED); deps_set_.insert(trx->last_seen_seqno()); assert(deps_set_.size() <= trx_map_.size()); } } if (!trx->certified()) trx->mark_certified(); #ifndef NDEBUG if (explicit_rollback) { assert(trx->explicit_rollback()); assert(retval == TEST_OK); assert(trx->state() == TrxHandle::S_CERTIFYING); } #endif /* NDEBUG */ return retval; } wsrep_seqno_t galera::Certification::set_trx_committed(TrxHandleSlave& trx) { assert(trx.global_seqno() >= 0); assert(trx.is_committed() == false); wsrep_seqno_t ret(WSREP_SEQNO_UNDEFINED); { gu::Lock lock(mutex_); // certified trx with local seqno WSREP_SEQNO_UNDEFINED originates from // IST so deps set tracking should not be done // Local explicit rollback events bypassed certificaiton if (trx.certified() == true && trx.local_seqno() != WSREP_SEQNO_UNDEFINED && !trx.cert_bypass()) { assert(trx.last_seen_seqno() != WSREP_SEQNO_UNDEFINED); DepsSet::iterator i(deps_set_.find(trx.last_seen_seqno())); assert(i != deps_set_.end()); if (deps_set_.size() == 1) safe_to_discard_seqno_ = *i; deps_set_.erase(i); } if (gu_unlikely(index_purge_required())) { ret = get_safe_to_discard_seqno_(); } } trx.mark_committed(); return ret; } void set_boolean_parameter(bool& param, const std::string& value, const std::string& param_name, const std::string& change_msg) { try { bool const old(param); param = gu::Config::from_config(value); if (old != param) { log_info << (param ? "Enabled " : "Disabled ") << change_msg; } } catch (gu::NotFound& e) { gu_throw_error(EINVAL) << "Bad value '" << value << "' for boolean parameter '" << param_name << '\''; } } void galera::Certification::param_set(const std::string& key, const std::string& value) { if (key == Certification::PARAM_LOG_CONFLICTS) { set_boolean_parameter(log_conflicts_, value, CERT_PARAM_LOG_CONFLICTS, "logging of certification conflicts."); } else if (key == Certification::PARAM_OPTIMISTIC_PA) { set_boolean_parameter(optimistic_pa_, value, CERT_PARAM_OPTIMISTIC_PA, "\"optimistic\" parallel applying."); } else { throw gu::NotFound(); } conf_.set(key, value); } void galera::Certification::mark_inconsistent() { gu::Lock lock(mutex_); assert(!inconsistent_); inconsistent_ = true; } galera-26.4.3/galera/src/replicator_smm_stats.cpp0000664000177500017540000003173513540715002020353 0ustar dbartmy/* Copyright (C) 2010-2017 Codership Oy */ #include "replicator_smm.hpp" #include #include // @todo: should be protected static member of the parent class static wsrep_member_status_t state2stats(galera::ReplicatorSMM::State state) { switch (state) { case galera::ReplicatorSMM::S_DESTROYED : case galera::ReplicatorSMM::S_CLOSED : case galera::ReplicatorSMM::S_CONNECTED : return WSREP_MEMBER_UNDEFINED; case galera::ReplicatorSMM::S_JOINING : return WSREP_MEMBER_JOINER; case galera::ReplicatorSMM::S_JOINED : return WSREP_MEMBER_JOINED; case galera::ReplicatorSMM::S_SYNCED : return WSREP_MEMBER_SYNCED; case galera::ReplicatorSMM::S_DONOR : return WSREP_MEMBER_DONOR; } log_fatal << "Unknown state code: " << state; assert(0); return WSREP_MEMBER_ERROR; } // @todo: should be protected static member of the parent class static const char* state2stats_str(galera::ReplicatorSMM::State state, galera::ReplicatorSMM::SstState sst_state) { using galera::ReplicatorSMM; switch (state) { case galera::ReplicatorSMM::S_DESTROYED: return "Destroyed"; case galera::ReplicatorSMM::S_CLOSED: case galera::ReplicatorSMM::S_CONNECTED: { if (sst_state == ReplicatorSMM::SST_REQ_FAILED) return "Joining: State Transfer request failed"; else if (sst_state == ReplicatorSMM::SST_FAILED) return "Joining: State Transfer failed"; else return "Initialized"; } case galera::ReplicatorSMM::S_JOINING: { if (sst_state == ReplicatorSMM::SST_WAIT) return "Joining: receiving State Transfer"; else return "Joining"; } case galera::ReplicatorSMM::S_JOINED: return "Joined"; case galera::ReplicatorSMM::S_SYNCED: return "Synced"; case galera::ReplicatorSMM::S_DONOR: return "Donor/Desynced"; } log_fatal << "Unknown state: " << state; assert(0); return "Unknown state code: "; } typedef enum status_vars { STATS_STATE_UUID = 0, STATS_PROTOCOL_VERSION, STATS_LAST_COMMITTED, STATS_REPLICATED, STATS_REPLICATED_BYTES, STATS_KEYS_COUNT, STATS_KEYS_BYTES, STATS_DATA_BYTES, STATS_UNRD_BYTES, STATS_RECEIVED, STATS_RECEIVED_BYTES, STATS_LOCAL_COMMITS, STATS_LOCAL_CERT_FAILURES, STATS_LOCAL_REPLAYS, STATS_LOCAL_SEND_QUEUE, STATS_LOCAL_SEND_QUEUE_MAX, STATS_LOCAL_SEND_QUEUE_MIN, STATS_LOCAL_SEND_QUEUE_AVG, STATS_LOCAL_RECV_QUEUE, STATS_LOCAL_RECV_QUEUE_MAX, STATS_LOCAL_RECV_QUEUE_MIN, STATS_LOCAL_RECV_QUEUE_AVG, STATS_LOCAL_CACHED_DOWNTO, STATS_FC_PAUSED_NS, STATS_FC_PAUSED_AVG, STATS_FC_SSENT, // STATS_FC_CSENT, STATS_FC_RECEIVED, STATS_CERT_DEPS_DISTANCE, STATS_APPLY_OOOE, STATS_APPLY_OOOL, STATS_APPLY_WINDOW, STATS_COMMIT_OOOE, STATS_COMMIT_OOOL, STATS_COMMIT_WINDOW, STATS_LOCAL_STATE, STATS_LOCAL_STATE_COMMENT, STATS_CERT_INDEX_SIZE, STATS_CAUSAL_READS, STATS_CERT_INTERVAL, STATS_OPEN_TRX, STATS_OPEN_CONN, STATS_INCOMING_LIST, STATS_MAX } StatusVars; static const struct wsrep_stats_var wsrep_stats[STATS_MAX + 1] = { { "local_state_uuid", WSREP_VAR_STRING, { 0 } }, { "protocol_version", WSREP_VAR_INT64, { 0 } }, { "last_committed", WSREP_VAR_INT64, { -1 } }, { "replicated", WSREP_VAR_INT64, { 0 } }, { "replicated_bytes", WSREP_VAR_INT64, { 0 } }, { "repl_keys", WSREP_VAR_INT64, { 0 } }, { "repl_keys_bytes", WSREP_VAR_INT64, { 0 } }, { "repl_data_bytes", WSREP_VAR_INT64, { 0 } }, { "repl_other_bytes", WSREP_VAR_INT64, { 0 } }, { "received", WSREP_VAR_INT64, { 0 } }, { "received_bytes", WSREP_VAR_INT64, { 0 } }, { "local_commits", WSREP_VAR_INT64, { 0 } }, { "local_cert_failures", WSREP_VAR_INT64, { 0 } }, { "local_replays", WSREP_VAR_INT64, { 0 } }, { "local_send_queue", WSREP_VAR_INT64, { 0 } }, { "local_send_queue_max", WSREP_VAR_INT64, { 0 } }, { "local_send_queue_min", WSREP_VAR_INT64, { 0 } }, { "local_send_queue_avg", WSREP_VAR_DOUBLE, { 0 } }, { "local_recv_queue", WSREP_VAR_INT64, { 0 } }, { "local_recv_queue_max", WSREP_VAR_INT64, { 0 } }, { "local_recv_queue_min", WSREP_VAR_INT64, { 0 } }, { "local_recv_queue_avg", WSREP_VAR_DOUBLE, { 0 } }, { "local_cached_downto", WSREP_VAR_INT64, { 0 } }, { "flow_control_paused_ns", WSREP_VAR_INT64, { 0 } }, { "flow_control_paused", WSREP_VAR_DOUBLE, { 0 } }, { "flow_control_sent", WSREP_VAR_INT64, { 0 } }, // { "flow_control_conts_sent", WSREP_VAR_INT64, { 0 } }, { "flow_control_recv", WSREP_VAR_INT64, { 0 } }, { "cert_deps_distance", WSREP_VAR_DOUBLE, { 0 } }, { "apply_oooe", WSREP_VAR_DOUBLE, { 0 } }, { "apply_oool", WSREP_VAR_DOUBLE, { 0 } }, { "apply_window", WSREP_VAR_DOUBLE, { 0 } }, { "commit_oooe", WSREP_VAR_DOUBLE, { 0 } }, { "commit_oool", WSREP_VAR_DOUBLE, { 0 } }, { "commit_window", WSREP_VAR_DOUBLE, { 0 } }, { "local_state", WSREP_VAR_INT64, { 0 } }, { "local_state_comment", WSREP_VAR_STRING, { 0 } }, { "cert_index_size", WSREP_VAR_INT64, { 0 } }, { "causal_reads", WSREP_VAR_INT64, { 0 } }, { "cert_interval", WSREP_VAR_DOUBLE, { 0 } }, { "open_transactions", WSREP_VAR_INT64, { 0 } }, { "open_connections", WSREP_VAR_INT64, { 0 } }, { "incoming_addresses", WSREP_VAR_STRING, { 0 } }, { 0, WSREP_VAR_STRING, { 0 } } }; void galera::ReplicatorSMM::build_stats_vars ( std::vector& stats) { const struct wsrep_stats_var* ptr(wsrep_stats); do { stats.push_back(*ptr); } while (ptr++->name != 0); stats[STATS_STATE_UUID].value._string = state_uuid_str_; } const struct wsrep_stats_var* galera::ReplicatorSMM::stats_get() const { if (S_DESTROYED == state_()) return 0; std::vector sv(wsrep_stats_); sv[STATS_PROTOCOL_VERSION ].value._int64 = protocol_version_; wsrep_gtid last_committed; (void)last_committed_id(&last_committed); sv[STATS_LAST_COMMITTED ].value._int64 = last_committed.seqno; sv[STATS_REPLICATED ].value._int64 = replicated_(); sv[STATS_REPLICATED_BYTES ].value._int64 = replicated_bytes_(); sv[STATS_KEYS_COUNT ].value._int64 = keys_count_(); sv[STATS_KEYS_BYTES ].value._int64 = keys_bytes_(); sv[STATS_DATA_BYTES ].value._int64 = data_bytes_(); sv[STATS_UNRD_BYTES ].value._int64 = unrd_bytes_(); sv[STATS_RECEIVED ].value._int64 = as_->received(); sv[STATS_RECEIVED_BYTES ].value._int64 = as_->received_bytes(); sv[STATS_LOCAL_COMMITS ].value._int64 = local_commits_(); sv[STATS_LOCAL_CERT_FAILURES].value._int64 = local_cert_failures_(); sv[STATS_LOCAL_REPLAYS ].value._int64 = local_replays_(); struct gcs_stats stats; gcs_.get_stats (&stats); sv[STATS_LOCAL_SEND_QUEUE ].value._int64 = stats.send_q_len; sv[STATS_LOCAL_SEND_QUEUE_MAX].value._int64 = stats.send_q_len_max; sv[STATS_LOCAL_SEND_QUEUE_MIN].value._int64 = stats.send_q_len_min; sv[STATS_LOCAL_SEND_QUEUE_AVG].value._double = stats.send_q_len_avg; sv[STATS_LOCAL_RECV_QUEUE ].value._int64 = stats.recv_q_len; sv[STATS_LOCAL_RECV_QUEUE_MAX].value._int64 = stats.recv_q_len_max; sv[STATS_LOCAL_RECV_QUEUE_MIN].value._int64 = stats.recv_q_len_min; sv[STATS_LOCAL_RECV_QUEUE_AVG].value._double = stats.recv_q_len_avg; sv[STATS_LOCAL_CACHED_DOWNTO ].value._int64 = gcache_.seqno_min(); sv[STATS_FC_PAUSED_NS ].value._int64 = stats.fc_paused_ns; sv[STATS_FC_PAUSED_AVG ].value._double = stats.fc_paused_avg; sv[STATS_FC_SSENT ].value._int64 = stats.fc_ssent; // sv[STATS_FC_CSENT ].value._int64 = stats.fc_csent; sv[STATS_FC_RECEIVED ].value._int64 = stats.fc_received; double avg_cert_interval(0); double avg_deps_dist(0); size_t index_size(0); cert_.stats_get(avg_cert_interval, avg_deps_dist, index_size); sv[STATS_CERT_DEPS_DISTANCE ].value._double = avg_deps_dist; sv[STATS_CERT_INTERVAL ].value._double = avg_cert_interval; sv[STATS_CERT_INDEX_SIZE ].value._int64 = index_size; double oooe; double oool; double win; apply_monitor_.get_stats(&oooe, &oool, &win); sv[STATS_APPLY_OOOE ].value._double = oooe; sv[STATS_APPLY_OOOL ].value._double = oool; sv[STATS_APPLY_WINDOW ].value._double = win; commit_monitor_.get_stats(&oooe, &oool, &win); sv[STATS_COMMIT_OOOE ].value._double = oooe; sv[STATS_COMMIT_OOOL ].value._double = oool; sv[STATS_COMMIT_WINDOW ].value._double = win; if (st_.corrupt()) { sv[STATS_LOCAL_STATE ].value._int64 = WSREP_MEMBER_ERROR; sv[STATS_LOCAL_STATE_COMMENT].value._string = "Inconsistent"; } else { sv[STATS_LOCAL_STATE ].value._int64 =state2stats(state_()); sv[STATS_LOCAL_STATE_COMMENT].value._string =state2stats_str(state_(), sst_state_); } sv[STATS_CAUSAL_READS ].value._int64 = causal_reads_(); Wsdb::stats wsdb_stats(wsdb_.get_stats()); sv[STATS_OPEN_TRX].value._int64 = wsdb_stats.n_trx_; sv[STATS_OPEN_CONN].value._int64 = wsdb_stats.n_conn_; // Get gcs backend status gu::Status status; gcs_.get_status(status); #ifdef GU_DBUG_ON status.insert("debug_sync_waiters", gu_debug_sync_waiters()); #endif // GU_DBUG_ON // Dynamical strings are copied into buffer allocated after stats var array. // Compute space needed. size_t tail_size(0); for (gu::Status::const_iterator i(status.begin()); i != status.end(); ++i) { tail_size += i->first.size() + 1 + i->second.size() + 1; } gu::Lock lock_inc(incoming_mutex_); tail_size += incoming_list_.size() + 1; /* Create a buffer to be passed to the caller. */ // The buffer size needed: // * Space for wsrep_stats_ array // * Space for additional elements from status map // * Trailing space for string store size_t const vec_size( (sv.size() + status.size())*sizeof(struct wsrep_stats_var)); struct wsrep_stats_var* const buf(static_cast( gu_malloc(vec_size + tail_size))); if (buf) { // Resize sv to have enough space for variables from status sv.resize(sv.size() + status.size()); // Initial tail_buf position char* tail_buf(reinterpret_cast(buf + sv.size())); // Assign incoming list strncpy(tail_buf, incoming_list_.c_str(), incoming_list_.size() + 1); sv[STATS_INCOMING_LIST].value._string = tail_buf; tail_buf += incoming_list_.size() + 1; // Iterate over dynamical status variables and assing strings size_t sv_pos(STATS_INCOMING_LIST + 1); for (gu::Status::const_iterator i(status.begin()); i != status.end(); ++i, ++sv_pos) { // Name strncpy(tail_buf, i->first.c_str(), i->first.size() + 1); sv[sv_pos].name = tail_buf; tail_buf += i->first.size() + 1; // Type sv[sv_pos].type = WSREP_VAR_STRING; // Value strncpy(tail_buf, i->second.c_str(), i->second.size() + 1); sv[sv_pos].value._string = tail_buf; tail_buf += i->second.size() + 1; } assert(sv_pos == sv.size() - 1); // NULL terminate sv[sv_pos].name = 0; sv[sv_pos].type = WSREP_VAR_STRING; sv[sv_pos].value._string = 0; assert(static_cast (tail_buf - reinterpret_cast(buf)) == vec_size + tail_size); assert(reinterpret_cast(buf)[vec_size + tail_size - 1] == '\0'); // Finally copy sv vector to buf memcpy(buf, &sv[0], vec_size); } else { log_warn << "Failed to allocate stats vars buffer to " << (vec_size + tail_size) << " bytes. System is running out of memory."; } return buf; } void galera::ReplicatorSMM::stats_reset() { if (S_DESTROYED == state_()) return; gcs_.flush_stats (); apply_monitor_.flush_stats(); commit_monitor_.flush_stats(); cert_.stats_reset(); } void galera::ReplicatorSMM::stats_free(struct wsrep_stats_var* arg) { gu_free(arg); } galera-26.4.3/galera/src/write_set_ng.hpp0000664000177500017540000007250013540715002016606 0ustar dbartmy// // Copyright (C) 2013-2017 Codership Oy // /* * Planned writeset composition (not to scale): * * [WS header][ key set ][ data set ][ unordered set ] * * WS header contains common info: total size, set versions etc. * Key set and data set are always present, unordered set is optional. */ #ifndef GALERA_WRITE_SET_NG_HPP #define GALERA_WRITE_SET_NG_HPP #include "wsrep_api.h" #include "key_set.hpp" #include "data_set.hpp" #include "gu_serialize.hpp" #include "gu_vector.hpp" #include #include #include #include namespace galera { class WriteSetNG { public: static int const MAX_SIZE = 0x7fffffff; static int const MAX_PA_RANGE = 0x0000ffff; enum Version { VER3 = 3, VER4, VER5 }; /* Max header version that we can understand */ static Version const MAX_VERSION = VER5; /* Parses beginning of the header to detect writeset version and * returns it as raw integer for backward compatibility * static Version version(int v) will convert it to enum */ static int version(const void* const buf, size_t const buflen) { if (gu_likely(buflen >= 4)) { const gu::byte_t* const b(static_cast(buf)); if (b[0] == Header::MAGIC_BYTE && b[1] >= ((VER3 << 4) | VER3) && b[2] >= 32 /* header size will hardly ever go below 32 */) { int const min_ver(b[1] & 0x0f); int const max_ver(b[1] >> 4); if (min_ver <= max_ver) /* sanity check */ { /* supported situations: return max supported version */ if (max_ver < MAX_VERSION) return max_ver; if (min_ver <= MAX_VERSION) return MAX_VERSION; /* unsupported situation: minimum required version is * greater than maximum known */ return min_ver; } } else if (0 == b[1] && 0 == b[2] && b[3] <= 2) { /* header from 2.x and before */ return b[3]; } /* unrecognized header, fall through to error */ } return -1; } static Version version(int v) { switch (v) { case VER3: return VER3; case VER4: return VER4; case VER5: return VER5; } gu_throw_error (EPROTO) << "Unrecognized writeset version: " << v; } /* These flags should be fixed to wire protocol version and so * technically can't be initialized to WSREP_FLAG_xxx macros as the * latter may arbitrarily change. */ enum Flags { F_COMMIT = 1 << 0, F_ROLLBACK = 1 << 1, F_TOI = 1 << 2, F_PA_UNSAFE = 1 << 3, F_COMMUTATIVE = 1 << 4, F_NATIVE = 1 << 5, F_BEGIN = 1 << 6, F_PREPARE = 1 << 7, /* * reserved for provider extension */ F_CERTIFIED = 1 << 14, // needed to correctly interprete pa_range // field (VER5 and up) F_PREORDERED = 1 << 15 // (VER5 and up) }; static bool const FLAGS_MATCH_API_FLAGS = (WSREP_FLAG_TRX_END == F_COMMIT && WSREP_FLAG_ROLLBACK == F_ROLLBACK && WSREP_FLAG_ISOLATION == F_TOI && WSREP_FLAG_PA_UNSAFE == F_PA_UNSAFE && WSREP_FLAG_COMMUTATIVE == F_COMMUTATIVE && WSREP_FLAG_NATIVE == F_NATIVE && WSREP_FLAG_TRX_START == F_BEGIN && WSREP_FLAG_TRX_PREPARE == F_PREPARE); static uint32_t wsrep_flags_to_ws_flags (uint32_t flags); typedef gu::RecordSet::GatherVector GatherVector; /* TODO: separate metadata access from physical representation in * future versions */ class Header { public: static unsigned char const MAGIC_BYTE = 'G'; static Version version(const gu::Buf& buf) { /* the following will throw if version is not supported */ return WriteSetNG::version (WriteSetNG::version(buf.ptr, buf.size)); } static unsigned char size(Version ver) { switch (ver) { case VER3: // fall through case VER4: // fall through case VER5: { GU_COMPILE_ASSERT(0 == (V3_SIZE % GU_MIN_ALIGNMENT), unaligned_header_size); return V3_SIZE; } } log_fatal << "Unknown writeset version: " << ver; abort(); // want to dump core right here } /* This is for WriteSetOut */ explicit Header (Version ver) : local_(), ptr_(local_), ver_(ver), size_(size(ver)), chksm_() { assert((uintptr_t(ptr_) % GU_WORD_BYTES) == 0); assert (size_t(size_) <= sizeof(local_)); } size_t gather (KeySet::Version kver, DataSet::Version const dver, bool unord, bool annot, uint16_t flags, const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, GatherVector& out); /* records last_seen, timestamp and CRC before replication */ void finalize(wsrep_seqno_t ls, int pa_range); /* records partial seqno, pa_range, timestamp and CRC before * replication (for preordered events)*/ void finalize_preordered(uint16_t pa_range) { finalize(0, pa_range); } /* This is for WriteSetIn */ explicit Header (const gu::Buf& buf) : local_(), ptr_ (static_cast(const_cast(buf.ptr))), ver_ (version(buf)), size_ (check_size(ver_, ptr_, buf.size)), chksm_(ver_, ptr_, size_) { assert((uintptr_t(ptr_) % GU_WORD_BYTES) == 0); } Header () : local_(), ptr_(NULL), ver_(), size_(0), chksm_() {} /* for late WriteSetIn initialization */ void read_buf (const gu::Buf& buf) { ver_ = version(buf); ptr_ = static_cast(const_cast(buf.ptr)); gu_trace(size_ = check_size (ver_, ptr_, buf.size)); Checksum::verify(ver_, ptr_, size_); } Version version() const { return ver_; } unsigned char size() const { return size_; } const gu::byte_t* ptr() const { return ptr_; } KeySet::Version keyset_ver() const { return KeySet::version((ptr_[V3_SETS_OFF] & 0xf0) >> 4); } bool has_keys() const { return keyset_ver() != KeySet::EMPTY; } bool has_unrd() const { return (ptr_[V3_SETS_OFF] & V3_UNORD_FLAG); } bool has_annt() const { return (ptr_[V3_SETS_OFF] & V3_ANNOT_FLAG); } DataSet::Version dataset_ver() const { return DataSet::version((ptr_[V3_SETS_OFF] & 0x0c) >> 2); } DataSet::Version unrdset_ver() const { return has_unrd() ? dataset_ver() : DataSet::EMPTY; } DataSet::Version anntset_ver() const { return has_annt() ? dataset_ver() : DataSet::EMPTY; } uint16_t flags() const { uint16_t ret; gu::unserialize2(ptr_, V3_FLAGS_OFF, ret); return ret; } uint16_t pa_range() const { uint16_t ret; gu::unserialize2(ptr_, V3_PA_RANGE_OFF, ret); return ret; } wsrep_seqno_t last_seen() const { assert (pa_range() == 0 || version() >= VER5); return seqno_priv(); } wsrep_seqno_t seqno() const { return seqno_priv(); } long long timestamp() const { long long ret; gu::unserialize8(ptr_, V3_TIMESTAMP_OFF, ret); return ret; } const wsrep_uuid_t& source_id() const { /* This one is tricky. I would not like to create a copy * of 16 bytes for the sole purpose of referencing it when * alignment in the buffer is already guaranteed */ assert(uintptr_t(ptr_ + V3_SOURCE_ID_OFF)%GU_WORD_BYTES == 0); return *(reinterpret_cast (ptr_ + V3_SOURCE_ID_OFF)); } wsrep_conn_id_t conn_id() const { wsrep_conn_id_t ret; gu::unserialize8(ptr_, V3_CONN_ID_OFF, ret); return ret; } wsrep_trx_id_t trx_id() const { wsrep_trx_id_t ret; gu::unserialize8(ptr_, V3_TRX_ID_OFF, ret); return ret; } const gu::byte_t* payload() const { return ptr_ + size(); } /* to set seqno and parallel applying range after certification */ void set_seqno(wsrep_seqno_t seqno, uint16_t pa_range); gu::Buf copy(bool include_keys, bool include_unrd) const; private: static ssize_t check_size (Version const ver, const gu::byte_t* const buf, ssize_t const bufsize) { assert (bufsize > 4); ssize_t const hsize(buf[V3_HEADER_SIZE_OFF]); if (gu_unlikely(hsize > bufsize)) { gu_throw_error (EMSGSIZE) << "Input buffer size " << bufsize << " smaller than header size " << hsize; } return hsize; } static int const V3_CHECKSUM_SIZE = 8; class Checksum { public: typedef uint64_t type_t; static void compute (const void* ptr, size_t size, type_t& value) { gu::FastHash::digest (ptr, size, value); } static void verify (Version ver, const void* ptr, ssize_t size); Checksum () {} Checksum (Version ver, const void* ptr, ssize_t size) { verify (ver, ptr, size); } private: GU_COMPILE_ASSERT(sizeof(type_t) == V3_CHECKSUM_SIZE, uhoh); }; static unsigned char const V3_ANNOT_FLAG = 0x01; static unsigned char const V3_UNORD_FLAG = 0x02; /* Fist 8 bytes of header: 0: 'G' - "magic" byte 1: bits 4-7: header version bits 0-3: minimum compatible version 2: header size (payload offset) 3: bits 4-7: keyset version bits 2-3: dataset version bit 1: has unordered set bit 0: has annotation 4-5: flags 6-7: PA range all multibyte integers are in little-endian encoding */ static int const V3_MAGIC_OFF = 0; static int const V3_HEADER_VERS_OFF = V3_MAGIC_OFF + 1; static int const V3_HEADER_SIZE_OFF = V3_HEADER_VERS_OFF + 1; static int const V3_SETS_OFF = V3_HEADER_SIZE_OFF + 1; static int const V3_FLAGS_OFF = V3_SETS_OFF + 1; static int const V3_PA_RANGE_OFF = V3_FLAGS_OFF + 2; static int const V3_LAST_SEEN_OFF = V3_PA_RANGE_OFF + 2; static int const V3_SEQNO_OFF = V3_LAST_SEEN_OFF; // seqno takes place of last seen static int const V3_TIMESTAMP_OFF = V3_LAST_SEEN_OFF + 8; static int const V3_SOURCE_ID_OFF = V3_TIMESTAMP_OFF + 8; static int const V3_CONN_ID_OFF = V3_SOURCE_ID_OFF + 16; static int const V3_TRX_ID_OFF = V3_CONN_ID_OFF + 8; static int const V3_CRC_OFF = V3_TRX_ID_OFF + 8; static int const V3_SIZE = V3_CRC_OFF + 8; // 64 struct Offsets { int const header_ver_; int const header_size_; int const sets_; int const flags_; int const pa_range_; int const last_seen_; int const seqno_; int const timestamp_; int const source_id_; int const conn_id_; int const trx_id_; int const crc_; Offsets(int, int, int, int, int, int, int, int, int, int, int, int); }; static Offsets const V3; static int const MAX_HEADER_SIZE = V3_SIZE; mutable gu::byte_t local_[MAX_HEADER_SIZE]; gu::byte_t* ptr_; Version ver_; gu::byte_t size_; Checksum chksm_; wsrep_seqno_t seqno_priv() const { wsrep_seqno_t ret; gu::unserialize8(ptr_, V3_LAST_SEEN_OFF, ret); return ret; } static void update_checksum(gu::byte_t* const ptr, size_t const size) { Checksum::type_t cval; Checksum::compute (ptr, size, cval); gu::serialize(cval, ptr, size); } }; /* class Header */ private: /* this assert should be removed when wsrep API flags become * explicitly incompatible with wirteset flags */ GU_COMPILE_ASSERT(FLAGS_MATCH_API_FLAGS, flags_incompatible); template static inline uint32_t wsrep_flags_to_ws_flags_tmpl (uint32_t const flags) { assert(0); // remove when needed uint32_t ret(0); if (flags & WSREP_FLAG_TRX_END) ret |= F_COMMIT; if (flags & WSREP_FLAG_ROLLBACK) ret |= F_ROLLBACK; if (flags & WSREP_FLAG_ISOLATION) ret |= F_TOI; if (flags & WSREP_FLAG_PA_UNSAFE) ret |= F_PA_UNSAFE; if (flags & WSREP_FLAG_COMMUTATIVE) ret |= F_COMMUTATIVE; if (flags & WSREP_FLAG_NATIVE) ret |= F_NATIVE; if (flags & WSREP_FLAG_TRX_START) ret |= F_BEGIN; if (flags & WSREP_FLAG_TRX_PREPARE) ret |= F_PREPARE; return ret; } }; /* class WriteSetNG */ template <> inline uint32_t WriteSetNG::wsrep_flags_to_ws_flags_tmpl(uint32_t const flags) { return flags; } inline uint32_t WriteSetNG::wsrep_flags_to_ws_flags (uint32_t const flags) { return wsrep_flags_to_ws_flags_tmpl(flags); } class WriteSetOut { public: typedef gu::RecordSetOutBase::BaseName BaseName; WriteSetOut (const std::string& dir_name, wsrep_trx_id_t id, KeySet::Version kver, gu::byte_t* reserved, size_t reserved_size, uint16_t flags = 0, gu::RecordSet::Version rsv = gu::RecordSet::VER2, WriteSetNG::Version ver = WriteSetNG::MAX_VERSION, DataSet::Version dver = DataSet::MAX_VERSION, DataSet::Version uver = DataSet::MAX_VERSION, size_t max_size = WriteSetNG::MAX_SIZE) : header_(ver), base_name_(dir_name, id), /* 1/8 of reserved (aligned by 8) goes to key set */ kbn_ (base_name_), keys_ (reserved, (reserved_size >>= 6, reserved_size <<= 3, reserved_size), kbn_, kver, rsv, ver), /* 5/8 of reserved goes to data set */ dbn_ (base_name_), data_ (reserved + reserved_size, reserved_size*5, dbn_, dver, rsv), /* 2/8 of reserved goes to unordered set */ ubn_ (base_name_), unrd_ (reserved + reserved_size*6, reserved_size*2, ubn_, uver,rsv), /* annotation set is not allocated unless requested */ abn_ (base_name_), annt_ (NULL), left_ (max_size - keys_.size() - data_.size() - unrd_.size() - header_.size()), flags_ (flags) { assert ((uintptr_t(reserved) % GU_WORD_BYTES) == 0); } ~WriteSetOut() { delete annt_; } void append_key(const KeyData& k) { left_ -= keys_.append(k); } void append_data(const void* data, size_t data_len, bool store) { left_ -= data_.append(data, data_len, store); } void append_unordered(const void* data, size_t data_len, bool store) { left_ -= unrd_.append(data, data_len, store); } void append_annotation(const void* data, size_t data_len, bool store) { if (NULL == annt_) { annt_ = new DataSetOut(NULL, 0, abn_, DataSet::MAX_VERSION, // use the same version as the dataset data_.gu::RecordSet::version()); left_ -= annt_->size(); } left_ -= annt_->append(data, data_len, store); } void set_flags(uint16_t flags) { flags_ = flags; } void add_flags(uint16_t flags) { flags_ |= flags; } void mark_toi() { flags_ |= WriteSetNG::F_TOI; } void mark_pa_unsafe() { flags_ |= WriteSetNG::F_PA_UNSAFE; } bool is_empty() const { return ((data_.count() + keys_.count() + unrd_.count() + (annt_ ? annt_->count() : 0)) == 0); } /* !!! This returns header without checksum! * * Use finalize() to finalize it. */ size_t gather(const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, WriteSetNG::GatherVector& out) { gu_trace(check_size()); out->reserve (out->size() + keys_.page_count() + data_.page_count() + unrd_.page_count() + 1 /* global header */); size_t out_size (header_.gather (keys_.version(), data_.version(), unrd_.version() != DataSet::EMPTY, NULL != annt_, flags_, source, conn, trx, out)); out_size += keys_.gather(out); out_size += data_.gather(out); out_size += unrd_.gather(out); if (NULL != annt_) out_size += annt_->gather(out); return out_size; } void finalize(wsrep_seqno_t const ls, int const pa_range) { header_.finalize(ls, pa_range); } /* Serializes wiriteset into a single buffer (for unit test purposes) * set last_seen to -1 if ws was explicitly finalized */ void serialize(std::vector& ret, const wsrep_uuid_t& source, const wsrep_conn_id_t& conn, const wsrep_trx_id_t& trx, const wsrep_seqno_t last_seen, const int pa_range = -1) { WriteSetNG::GatherVector out; size_t const out_size(gather(source, conn, trx, out)); finalize(last_seen, pa_range); ret.clear(); ret.reserve(out_size); /* concatenate all out buffers into ret */ for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr (static_cast(out[i].ptr)); ret.insert (ret.end(), ptr, ptr + out[i].size); } } void finalize_preordered (ssize_t pa_range) { assert (pa_range >= 0); /* By current convention pa_range is off by 1 from wsrep API def. * 0 meaning failed certification. */ pa_range++; header_.finalize_preordered(pa_range); } private: struct BaseNameCommon { const std::string& dir_name_; unsigned long long const id_; BaseNameCommon(const std::string& dir_name, unsigned long long id) : dir_name_(dir_name), id_ (id) {} }; template class BaseNameImpl : public BaseName { const BaseNameCommon& data_; public: BaseNameImpl (const BaseNameCommon& data) : data_(data) {} void print(std::ostream& os) const { os << data_.dir_name_ << "/0x" << std::hex << std::setfill('0') << std::setw(8) << data_.id_ << suffix_; } }; /* class BaseNameImpl */ static const char keys_suffix[]; static const char data_suffix[]; static const char unrd_suffix[]; static const char annt_suffix[]; WriteSetNG::Header header_; BaseNameCommon base_name_; BaseNameImpl kbn_; KeySetOut keys_; BaseNameImpl dbn_; DataSetOut data_; BaseNameImpl ubn_; DataSetOut unrd_; BaseNameImpl abn_; DataSetOut* annt_; ssize_t left_; uint16_t flags_; void check_size() { if (gu_unlikely(left_ < 0)) gu_throw_error (EMSGSIZE) << "Maximum writeset size exceeded by " << -left_; } WriteSetOut (const WriteSetOut&); WriteSetOut& operator= (const WriteSetOut); }; /* class WriteSetOut */ class WriteSetIn { public: WriteSetIn (const gu::Buf& buf, ssize_t const st = SIZE_THRESHOLD) : header_(buf), size_ (buf.size), keys_ (), data_ (), unrd_ (), annt_ (NULL), check_thr_id_(), check_thr_(false), check_ (false) { gu_trace(init(st)); } WriteSetIn () : header_(), size_ (0), keys_ (), data_ (), unrd_ (), annt_ (NULL), check_thr_id_(), check_thr_(false), check_ (false) {} void read_header (const gu::Buf& buf) { assert (0 == size_); assert (false == check_); header_.read_buf (buf); size_ = buf.size; } /* * WriteSetIn(buf) == WriteSetIn() + read_buf(buf) * * @param st threshold at which launch dedicated thread for checksumming * 0 - no checksumming */ void read_buf (const gu::Buf& buf, ssize_t const st = SIZE_THRESHOLD) { read_header (buf); gu_trace(init(st)); } void read_buf (const void* const ptr, ssize_t const len, ssize_t const st = SIZE_THRESHOLD) { assert (ptr != NULL); assert (len >= 0); gu::Buf tmp = { static_cast(ptr), len }; read_buf (tmp, st); } ~WriteSetIn () { if (gu_unlikely(check_thr_)) { /* checksum was performed in a parallel thread */ gu_thread_join (check_thr_id_, NULL); } delete annt_; } WriteSetNG::Version version() const { return header_.version(); } ssize_t size() const { return size_; } uint16_t flags() const { return header_.flags(); } bool is_toi() const { return flags() & WriteSetNG::F_TOI; } bool pa_unsafe() const { return flags() & WriteSetNG::F_PA_UNSAFE; } int pa_range() const { return header_.pa_range(); } bool certified() const { if (gu_likely(version() >= WriteSetNG::VER5)) return (flags() & WriteSetNG::F_CERTIFIED); else return (pa_range()); // VER3 } wsrep_seqno_t last_seen() const { return header_.last_seen(); } wsrep_seqno_t seqno() const { return header_.seqno(); } long long timestamp() const { return header_.timestamp(); } const wsrep_uuid_t& source_id() const { return header_.source_id(); } wsrep_conn_id_t conn_id() const { return header_.conn_id(); } wsrep_trx_id_t trx_id() const { return header_.trx_id(); } const KeySetIn& keyset() const { return keys_; } const DataSetIn& dataset() const { return data_; } const DataSetIn& unrdset() const { return unrd_; } bool annotated() const { return (annt_ != NULL); } void write_annotation(std::ostream& os) const; /* This should be called right after certification verdict is obtained * and before it is finalized. */ void verify_checksum() const /* throws */ { if (gu_unlikely(check_thr_)) { /* checksum was performed in a parallel thread */ gu_thread_join (check_thr_id_, NULL); check_thr_ = false; gu_trace(checksum_fin()); } } uint64_t get_checksum() const { /* since data segment is the only thing that definitely stays * unchanged through WS lifetime, it is the WS signature */ return (data_.get_checksum()); } void set_seqno(wsrep_seqno_t const seqno, int pa_range) { assert (seqno > 0); assert (pa_range >= 0); /* cap PA range by maximum we can represent */ if (gu_unlikely(pa_range > WriteSetNG::MAX_PA_RANGE)) pa_range = WriteSetNG::MAX_PA_RANGE; header_.set_seqno (seqno, pa_range); } typedef gu::Vector GatherVector; /* can return pointer to internal storage: out can be used only * within object scope. */ size_t gather(GatherVector& out, bool include_keys, bool include_unrd) const; private: WriteSetNG::Header header_; ssize_t size_; KeySetIn keys_; DataSetIn data_; DataSetIn unrd_; DataSetIn* annt_; gu_thread_t check_thr_id_; bool mutable check_thr_; bool check_; static size_t const SIZE_THRESHOLD = 1 << 22; /* 4Mb */ void checksum (); /* checksums writeset, stores result in check_ */ void checksum_fin() const { if (gu_unlikely(!check_)) { gu_throw_error(EINVAL) << "Writeset checksum failed"; } } static void* checksum_thread (void* arg) { WriteSetIn* ws(reinterpret_cast(arg)); ws->checksum(); return NULL; } /* late initialization after default constructor */ void init (ssize_t size_threshold); WriteSetIn (const WriteSetIn&); WriteSetIn& operator=(WriteSetIn); }; } /* namespace galera */ #endif // GALERA_WRITE_SET_HPP galera-26.4.3/galera/src/replicator.cpp0000664000177500017540000000116013540715002016246 0ustar dbartmy// // Copyright (C) 2010-2014 Codership Oy // #include "replicator.hpp" namespace galera { std::string const Replicator::Param::debug_log = "debug"; #ifdef GU_DBUG_ON std::string const Replicator::Param::dbug = "dbug"; std::string const Replicator::Param::signal = "signal"; #endif /* GU_DBUG_ON */ void Replicator::register_params(gu::Config& conf) { conf.add(Param::debug_log, "no"); #ifdef GU_DBUG_ON conf.add(Param::dbug, ""); conf.add(Param::signal, ""); #endif /* GU_DBUG_ON */ } const char* const Replicator::TRIVIAL_SST(WSREP_STATE_TRANSFER_TRIVIAL); } /* namespace galera */ galera-26.4.3/galera/src/data_set.hpp0000664000177500017540000001123713540715002015701 0ustar dbartmy// // Copyright (C) 2013 Codership Oy // #ifndef GALERA_DATA_SET_HPP #define GALERA_DATA_SET_HPP #include "gu_rset.hpp" #include "gu_vlq.hpp" namespace galera { class DataSet { public: enum Version { EMPTY = 0, VER1 }; static Version const MAX_VERSION = VER1; static Version version (unsigned int ver) { if (gu_likely (ver <= MAX_VERSION)) return static_cast(ver); gu_throw_error (EINVAL) << "Unrecognized DataSet version: " << ver; } /*! Dummy class to instantiate DataSetOut */ class RecordOut {}; /*! A class to instantiate DataSetIn: provides methods necessary to * iterate over the records serialized into single input buffer */ class RecordIn { public: static size_t serial_size (const gu::byte_t* const buf, size_t const size) { /* There's a single record in a dataset */ return size; } size_t serial_size () const { return size_; } RecordIn (const gu::byte_t* buf, size_t size) : size_(size), buf_(buf) {} gu::Buf buf() { gu::Buf ret = { buf_, size_ }; return ret; } private: ssize_t size_; const gu::byte_t* buf_; }; /* class RecordIn */ }; /* class DataSet */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif class DataSetOut : public gu::RecordSetOut { public: DataSetOut () // empty ctor for slave TrxHandle : gu::RecordSetOut(), version_() {} DataSetOut (gu::byte_t* reserved, size_t reserved_size, const BaseName& base_name, DataSet::Version version, gu::RecordSet::Version rsv) : gu::RecordSetOut ( reserved, reserved_size, base_name, check_type(version), rsv ), version_(version) { assert((uintptr_t(reserved) % GU_WORD_BYTES) == 0); } size_t append (const void* const src, size_t const size, bool const store) { /* append data as is, don't count as a new record */ gu_trace( gu::RecordSetOut::append (src, size, store, false); ); /* this will be deserialized using DataSet::RecordIn in DataSetIn */ return size; } DataSet::Version version () const { return count() ? version_ : DataSet::EMPTY; } typedef gu::RecordSet::GatherVector GatherVector; private: // depending on version we may pack data differently DataSet::Version const version_; static gu::RecordSet::CheckType check_type (DataSet::Version ver) { switch (ver) { case DataSet::EMPTY: break; /* Can't create EMPTY DataSetOut */ case DataSet::VER1: return gu::RecordSet::CHECK_MMH128; } throw; } }; /* class DataSetOut */ class DataSetIn : public gu::RecordSetIn { public: DataSetIn (DataSet::Version ver, const gu::byte_t* buf, size_t size) : gu::RecordSetIn(buf, size, false), version_(ver) {} DataSetIn () : gu::RecordSetIn(), version_(DataSet::EMPTY) {} void init (DataSet::Version ver, const gu::byte_t* buf, size_t size) { gu::RecordSetIn::init(buf, size, false); version_ = ver; } gu::Buf next () const { return gu::RecordSetIn::next().buf(); } private: DataSet::Version version_; }; /* class DataSetIn */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif } /* namespace galera */ #endif // GALERA_DATA_SET_HPP galera-26.4.3/galera/src/replicator_smm.hpp0000664000177500017540000010335113540715002017134 0ustar dbartmy// // Copyright (C) 2010-2019 Codership Oy // //! @file replicator_smm.hpp // // @brief Galera Synchronous Multi-Master replicator // #ifndef GALERA_REPLICATOR_SMM_HPP #define GALERA_REPLICATOR_SMM_HPP #include "replicator.hpp" #include "gu_init.h" #include "GCache.hpp" #include "gcs.hpp" #include "monitor.hpp" #include "wsdb.hpp" #include "certification.hpp" #include "trx_handle.hpp" #include "write_set.hpp" #include "galera_service_thd.hpp" #include "fsm.hpp" #include "action_source.hpp" #include "ist.hpp" #include "gu_atomic.hpp" #include "saved_state.hpp" #include "gu_debug_sync.hpp" #include #include namespace galera { class ReplicatorSMM : public Replicator, public ist::EventHandler { public: typedef enum { SST_NONE, SST_WAIT, SST_REQ_FAILED, SST_FAILED } SstState; static const size_t N_STATES = S_DONOR + 1; ReplicatorSMM(const wsrep_init_args* args); ~ReplicatorSMM(); wsrep_cap_t capabilities() const { return capabilities(proto_max_); } int trx_proto_ver() const { return trx_params_.version_; } int repl_proto_ver() const{ return protocol_version_; } wsrep_status_t connect(const std::string& cluster_name, const std::string& cluster_url, const std::string& state_donor, bool bootstrap); wsrep_status_t close(); wsrep_status_t async_recv(void* recv_ctx); TrxHandleMasterPtr get_local_trx(wsrep_trx_id_t trx_id, bool create = false) { return wsdb_.get_trx(trx_params_, uuid_, trx_id, create); } TrxHandleMasterPtr new_local_trx(wsrep_trx_id_t trx_id) { return wsdb_.new_trx(trx_params_, uuid_, trx_id); } void discard_local_trx(TrxHandleMaster* trx) { wsdb_.discard_trx(trx->trx_id()); } TrxHandleMasterPtr local_conn_trx(wsrep_conn_id_t conn_id, bool create) { return wsdb_.get_conn_query(trx_params_, uuid_, conn_id, create); } void discard_local_conn_trx(wsrep_conn_id_t conn_id) { wsdb_.discard_conn_query(conn_id); } void apply_trx(void* recv_ctx, TrxHandleSlave& trx); wsrep_status_t handle_apply_error(TrxHandleSlave& trx, const wsrep_buf_t& error_buf, const std::string& custom_msg); void process_apply_error(TrxHandleSlave&, const wsrep_buf_t&); wsrep_status_t send(TrxHandleMaster& trx, wsrep_trx_meta_t*); wsrep_status_t replicate(TrxHandleMaster& trx, wsrep_trx_meta_t*); wsrep_status_t abort_trx(TrxHandleMaster& trx, wsrep_seqno_t bf_seqno, wsrep_seqno_t* victim_seqno); wsrep_status_t certify(TrxHandleMaster& trx, wsrep_trx_meta_t*); wsrep_status_t commit_order_enter_local(TrxHandleMaster& trx); wsrep_status_t commit_order_enter_remote(TrxHandleSlave& trx); wsrep_status_t commit_order_leave(TrxHandleSlave& trx, const wsrep_buf_t* error); wsrep_status_t release_commit(TrxHandleMaster& trx); wsrep_status_t release_rollback(TrxHandleMaster& trx); wsrep_status_t replay_trx(TrxHandleMaster& trx, TrxHandleLock& lock, void* replay_ctx); wsrep_status_t sync_wait(wsrep_gtid_t* upto, int tout, wsrep_gtid_t* gtid); wsrep_status_t last_committed_id(wsrep_gtid_t* gtid) const; wsrep_status_t to_isolation_begin(TrxHandleMaster& trx, wsrep_trx_meta_t* meta); wsrep_status_t to_isolation_end(TrxHandleMaster& trx, const wsrep_buf_t* err); wsrep_status_t preordered_collect(wsrep_po_handle_t& handle, const struct wsrep_buf* data, size_t count, bool copy); wsrep_status_t preordered_commit(wsrep_po_handle_t& handle, const wsrep_uuid_t& source, uint64_t flags, int pa_range, bool commit); wsrep_status_t sst_sent(const wsrep_gtid_t& state_id, int rcode); wsrep_status_t sst_received(const wsrep_gtid_t& state_id, const wsrep_buf_t* state, int rcode); void process_trx(void* recv_ctx, const TrxHandleSlavePtr& trx); void process_commit_cut(wsrep_seqno_t seq, wsrep_seqno_t seqno_l); void submit_view_info(void* recv_ctx, const wsrep_view_info_t* cc); void process_conf_change(void* recv_ctx, const struct gcs_action& cc); void process_state_req(void* recv_ctx, const void* req, size_t req_size, wsrep_seqno_t seqno_l, wsrep_seqno_t donor_seq); void process_join(wsrep_seqno_t seqno, wsrep_seqno_t seqno_l); void process_sync(wsrep_seqno_t seqno_l); void process_vote(wsrep_seqno_t seq, int64_t code,wsrep_seqno_t seqno_l); const struct wsrep_stats_var* stats_get() const; void stats_reset(); void stats_free(struct wsrep_stats_var*); /*! @throws NotFound */ void set_param (const std::string& key, const std::string& value); /*! @throws NotFound */ void param_set (const std::string& key, const std::string& value); std::string param_get (const std::string& key) const; const gu::Config& params() const { return config_; } wsrep_seqno_t pause(); void resume(); void desync(); void resync(); const wsrep_uuid_t& source_id() const { return uuid_; } // IST Action handler interface void ist_trx(const TrxHandleSlavePtr& ts, bool must_apply, bool preload); void ist_cc(const gcs_action&, bool must_apply, bool preload); void ist_end(int error); // Enter apply monitor without waiting void apply_monitor_enter_immediately(const TrxHandleSlave& ts) { assert(!ts.explicit_rollback()); assert(ts.state() == TrxHandle::S_ABORTING); ApplyOrder ao(ts.global_seqno(), 0, ts.local()); gu_trace(apply_monitor_.enter(ao)); } // Cancel local and enter apply monitors for TrxHandle void cancel_monitors_for_local(const TrxHandleSlave& ts) { log_debug << "canceling monitors on behalf of trx: " << ts; assert(ts.local()); assert(ts.global_seqno() > 0); LocalOrder lo(ts); local_monitor_.self_cancel(lo); gu_trace(apply_monitor_enter_immediately(ts)); } // Cancel all monitors for given seqnos void cancel_seqnos(wsrep_seqno_t seqno_l, wsrep_seqno_t seqno_g); // Drain apply and commit monitors up to seqno void drain_monitors(wsrep_seqno_t seqno); class ISTEvent { public: enum Type { T_NULL, // empty T_TRX, // TrxHandleSlavePtr T_VIEW // configuration change }; ISTEvent() : ts_() , view_() , type_(T_NULL) { } ISTEvent(const TrxHandleSlavePtr& ts) : ts_(ts) , view_() , type_(T_TRX) { } ISTEvent(wsrep_view_info_t* view) : ts_() , view_(view) , type_(T_VIEW) { } ISTEvent(const ISTEvent& other) : ts_(other.ts_) , view_(other.view_) , type_(other.type_) { } ISTEvent& operator=(const ISTEvent& other) { ts_ = other.ts_; view_ = other.view_; type_ = other.type_; return *this; } ~ISTEvent() { } Type type() const { return type_; } TrxHandleSlavePtr ts() const { assert(T_TRX == type_); return ts_; } wsrep_view_info_t* view() const { assert(T_VIEW == type_); return view_; } private: TrxHandleSlavePtr ts_; wsrep_view_info_t* view_; Type type_; }; // Helper class to synchronize between IST receiver thread // applier threads. class ISTEventQueue { public: ISTEventQueue() : mutex_(), cond_(), eof_(false), error_(0), queue_() { } void reset() { eof_ = false; error_ = 0; } void eof(int error) { gu::Lock lock(mutex_); eof_ = true; error_ = error; cond_.broadcast(); } // Push back void push_back(const TrxHandleSlavePtr& ts) { gu::Lock lock(mutex_); queue_.push(ISTEvent(ts)); cond_.signal(); } // Push back void push_back(wsrep_view_info_t* view) { gu::Lock lock(mutex_); queue_.push(ISTEvent(view)); cond_.signal(); } // Pop front // // Throws gu::Exception() in case of error for the first // caller which will detect the error. // Returns null in case of EOF ISTEvent pop_front() { gu::Lock lock(mutex_); while (eof_ == false && queue_.empty() == true) { lock.wait(cond_); } ISTEvent ret; if (queue_.empty() == false) { ret = queue_.front(); queue_.pop(); } else { if (error_) { int err(error_); error_ = 0; // Make just one thread to detect the failure gu_throw_error(err) << "IST receiver reported failure"; } } return ret; } private: gu::Mutex mutex_; gu::Cond cond_; bool eof_; int error_; std::queue queue_; }; ISTEventQueue ist_event_queue_; void mark_corrupt_and_close() /* mark state as corrupt and try to leave cleanly */ { st_.mark_corrupt(); gu::Lock lock(closing_mutex_); start_closing(); } void on_inconsistency() { cert_.mark_inconsistent(); mark_corrupt_and_close(); } bool corrupt() const { return st_.corrupt(); } struct InitConfig { InitConfig(gu::Config&, const char* node_addr,const char* base_dir); }; class StateRequest { public: virtual int version () const = 0; virtual const void* req () const = 0; virtual ssize_t len () const = 0; virtual const void* sst_req () const = 0; virtual ssize_t sst_len () const = 0; virtual const void* ist_req () const = 0; virtual ssize_t ist_len () const = 0; virtual ~StateRequest() {} }; private: ReplicatorSMM(const ReplicatorSMM&); void operator=(const ReplicatorSMM&); struct Param { static const std::string base_host; static const std::string base_port; static const std::string base_dir; static const std::string proto_max; static const std::string key_format; static const std::string commit_order; static const std::string causal_read_timeout; static const std::string max_write_set_size; }; typedef std::pair Default; struct Defaults { std::map map_; Defaults (); }; static const Defaults defaults; // both a list of parameters and a list of default values static wsrep_cap_t capabilities(int protocol_version); // Return the global seqno of the last transaction which has // released commit order. Note that this does not mean that // the transaction with given gtid has completed the commit // on application side. wsrep_seqno_t last_committed() { return co_mode_ != CommitOrder::BYPASS ? commit_monitor_.last_left() : apply_monitor_.last_left(); } void report_last_committed(wsrep_seqno_t purge_seqno) { if (gu_unlikely(purge_seqno != -1)) { service_thd_.report_last_committed(purge_seqno); } } /* process pending queue events scheduled before seqno */ void process_pending_queue(wsrep_seqno_t seqno); wsrep_status_t cert (TrxHandleMaster*, const TrxHandleSlavePtr&); wsrep_status_t cert_and_catch (TrxHandleMaster*, const TrxHandleSlavePtr&); wsrep_status_t cert_for_aborted (const TrxHandleSlavePtr&); wsrep_status_t handle_commit_interrupt(TrxHandleMaster&, const TrxHandleSlave&); void update_state_uuid (const wsrep_uuid_t& u); void update_incoming_list (const wsrep_view_info_t& v); /* aborts/exits the program in a clean way */ void abort() GU_NORETURN; #ifdef GALERA_MONITOR_DEBUG_PRINT public: #endif /* GALERA_MONITOR_DEBUG_PRINT */ class LocalOrder { public: explicit LocalOrder(const TrxHandleSlave& ts) : seqno_(ts.local_seqno()) #if defined(GU_DBUG_ON) || !defined(NDEBUG) ,trx_(&ts) #endif //GU_DBUG_ON { } LocalOrder(wsrep_seqno_t seqno, const TrxHandleSlave* ts = NULL) : seqno_(seqno) #if defined(GU_DBUG_ON) || !defined(NDEBUG) ,trx_(ts) #endif //GU_DBUG_ON { #if defined(GU_DBUG_ON) || !defined(NDEBUG) assert((trx_ && seqno_ == trx_->local_seqno()) || !trx_); #endif //GU_DBUG_ON } wsrep_seqno_t seqno() const { return seqno_; } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { return (last_left + 1 == seqno_); } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex& mutex) { if (trx_) { if (trx_->local()) { mutex.unlock(); GU_DBUG_SYNC_WAIT("local_monitor_master_enter_sync"); mutex.lock(); } else { mutex.unlock(); GU_DBUG_SYNC_WAIT("local_monitor_slave_enter_sync"); mutex.lock(); } } } #endif //GU_DBUG_ON #ifndef NDEBUG LocalOrder() : seqno_(WSREP_SEQNO_UNDEFINED) #if defined(GU_DBUG_ON) || !defined(NDEBUG) ,trx_(NULL) #endif /* GU_DBUG_ON || !NDEBUG */ {} #endif /* NDEBUG */ void print(std::ostream& os) const { os << seqno_; } private: #ifdef NDEBUG LocalOrder(const LocalOrder& o); #endif /* NDEBUG */ wsrep_seqno_t const seqno_; #if defined(GU_DBUG_ON) || !defined(NDEBUG) // this pointer is for debugging purposes only and // is not guaranteed to point at a valid location const TrxHandleSlave* const trx_; #endif /* GU_DBUG_ON || !NDEBUG */ }; class ApplyOrder { public: ApplyOrder(const TrxHandleSlave& ts) : global_seqno_ (ts.global_seqno()), depends_seqno_(ts.depends_seqno()), is_local_ (ts.local()), is_toi_ (ts.is_toi()) #ifndef NDEBUG ,trx_ (&ts) #endif { #ifndef NDEBUG (void)trx_; // to pacify clang's -Wunused-private-field #endif } ApplyOrder(wsrep_seqno_t gs, wsrep_seqno_t ds, bool l = false) : global_seqno_ (gs), depends_seqno_(ds), is_local_ (l), is_toi_ (false) #ifndef NDEBUG ,trx_ (NULL) #endif { } wsrep_seqno_t seqno() const { return global_seqno_; } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { return ((is_local_ == true && is_toi_ == false) || last_left >= depends_seqno_); } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex& mutex) { if (is_local_) { mutex.unlock(); GU_DBUG_SYNC_WAIT("apply_monitor_master_enter_sync"); mutex.lock(); } else { mutex.unlock(); GU_DBUG_SYNC_WAIT("apply_monitor_slave_enter_sync"); mutex.lock(); } } #endif //GU_DBUG_ON #ifndef NDEBUG ApplyOrder() : global_seqno_ (WSREP_SEQNO_UNDEFINED), depends_seqno_(WSREP_SEQNO_UNDEFINED), is_local_ (false), is_toi_ (false), trx_ (NULL) {} #endif /* NDEBUG */ void print(std::ostream& os) const { os << "g:" << global_seqno_ << " d:" << depends_seqno_ << (is_local_ ? " L" : " R"); } private: #ifdef NDEBUG ApplyOrder(const ApplyOrder&); #endif /* NDEBUG */ const wsrep_seqno_t global_seqno_; const wsrep_seqno_t depends_seqno_; const bool is_local_; const bool is_toi_; #ifndef NDEBUG // this pointer is for debugging purposes only and // is not guaranteed to point at a valid location const TrxHandleSlave* const trx_; #endif }; class CommitOrder { public: typedef enum { BYPASS = 0, OOOC = 1, LOCAL_OOOC = 2, NO_OOOC = 3 } Mode; static Mode from_string(const std::string& str) { int ret(gu::from_string(str)); switch (ret) { case BYPASS: case OOOC: case LOCAL_OOOC: case NO_OOOC: break; default: gu_throw_error(EINVAL) << "invalid value " << str << " for commit order mode"; } return static_cast(ret); } CommitOrder(const TrxHandleSlave& ts, Mode mode) : global_seqno_(ts.global_seqno()), mode_(mode), is_local_(ts.local()) #ifndef NDEBUG ,trx_(&ts) #endif { } CommitOrder(wsrep_seqno_t gs, Mode mode, bool local = false) : global_seqno_(gs), mode_(mode), is_local_(local) #ifndef NDEBUG ,trx_(NULL) #endif { } wsrep_seqno_t seqno() const { return global_seqno_; } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { switch (mode_) { case BYPASS: gu_throw_fatal << "commit order condition called in bypass mode"; case OOOC: return true; case LOCAL_OOOC: return is_local_; // in case of remote trx fall through case NO_OOOC: return (last_left + 1 == global_seqno_); } gu_throw_fatal << "invalid commit mode value " << mode_; } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex& mutex) { if (is_local_ == true) { mutex.unlock(); GU_DBUG_SYNC_WAIT("commit_monitor_master_enter_sync"); mutex.lock(); } else { mutex.unlock(); GU_DBUG_SYNC_WAIT("commit_monitor_slave_enter_sync"); mutex.lock(); } } #endif //GU_DBUG_ON #ifndef NDEBUG CommitOrder() : global_seqno_ (WSREP_SEQNO_UNDEFINED), mode_ (OOOC), is_local_ (false), trx_ (NULL) { (void)trx_; // to pacify clang's -Wunused-private-field } #endif /* NDEBUG */ void print(std::ostream& os) const { os << "g:" << global_seqno_ << " m:" << mode_ << (is_local_ ? " L" : " R"); } private: #ifdef NDEBUG CommitOrder(const CommitOrder&); #endif const wsrep_seqno_t global_seqno_; const Mode mode_; const bool is_local_; #ifndef NDEBUG // this pointer is for debugging purposes only and // is not guaranteed to point at a valid location const TrxHandleSlave* const trx_; #endif }; private: // state machine class Transition { public: Transition(State const from, State const to) : from_(from), to_(to) { } State from() const { return from_; } State to() const { return to_; } bool operator==(Transition const& other) const { return (from_ == other.from_ && to_ == other.to_); } class Hash { public: size_t operator()(Transition const& tr) const { return (gu::HashValue(static_cast(tr.from_)) ^ gu::HashValue(static_cast(tr.to_))); } }; private: State from_; State to_; }; void build_stats_vars (std::vector& stats); void cancel_seqno(wsrep_seqno_t); void set_initial_position(const wsrep_uuid_t&, wsrep_seqno_t); void establish_protocol_versions (int version); /* * Record cc_seqno_ and cc_lowest_trx_seqno_ for future IST * processing. * * @param cc_seqno Seqno of current configuration change. * @param source String describing the source of the configuration * change. */ void record_cc_seqnos(wsrep_seqno_t cc_seqno, const char* source); bool state_transfer_required(const wsrep_view_info_t& view_info, bool rejoined); void prepare_for_IST (void*& req, ssize_t& req_len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t group_seqno); void recv_IST(void* recv_ctx); void process_IST_writeset(void* recv_ctx, const TrxHandleSlavePtr& ts); StateRequest* prepare_state_request (const void* sst_req, ssize_t sst_req_len, const wsrep_uuid_t& group_uuid, wsrep_seqno_t group_seqno); void send_state_request (const StateRequest* req); void request_state_transfer (void* recv_ctx, const wsrep_uuid_t& group_uuid, wsrep_seqno_t group_seqno, const void* sst_req, ssize_t sst_req_len); /* resume reception of GCS events */ void resume_recv() { gcs_.resume_recv(); ist_end(0); } /* These methods facilitate closing procedure. * They must be called under closing_mutex_ lock */ void start_closing(); void shift_to_CLOSED(); void wait_for_CLOSED(gu::Lock&); wsrep_seqno_t donate_sst(void* recv_ctx, const StateRequest& streq, const wsrep_gtid_t& state_id, bool bypass); /* Wait until NBO end criteria is met */ wsrep_status_t wait_nbo_end(TrxHandleMaster*, wsrep_trx_meta_t*); class InitLib /* Library initialization routines */ { public: InitLib (gu_log_cb_t cb) { gu_init(cb); } }; InitLib init_lib_; gu::Config config_; InitConfig init_config_; // registers configurable parameters and defaults struct ParseOptions { ParseOptions(Replicator& repl, gu::Config&, const char* opts); } parse_options_; // parse option string supplied on initialization class InitSSL { public: InitSSL(gu::Config& conf) { gu::ssl_init_options(conf); } } init_ssl_; // initialize global SSL parameters static int const MAX_PROTO_VER; /* * |--------------------------------------------------------------------| * | protocol_version_ | trx version | str_proto_ver_ | record_set_ver_ | * |--------------------------------------------------------------------| * | 1 | 1 | 0 | 1 | * | 2 | 1 | 1 | 1 | * | 3 | 2 | 1 | 1 | * | 4 | 2 | v2.1 1 | 1 | * | 5 | 3 | 1 | 1 | * | 6 | 3 | 2 | 1 | * | 7 | 3 | 2 | 1 | * | 8 | 3 | 2 | alignment 2 | * | 9 | SS keys 4 | 2 | 2 | * | 4.x 10 | PA range/ 5 | CC events / 3 | 2 | * | | UPD keys | idx preload | | * |--------------------------------------------------------------------| */ int str_proto_ver_;// state transfer request protocol int protocol_version_;// general repl layer proto int proto_max_; // maximum allowed proto version FSM state_; gu::Mutex closing_mutex_; // to sync close() call gu::Cond closing_cond_; bool closing_; // to indicate that the closing process // started SstState sst_state_; // configurable params const CommitOrder::Mode co_mode_; // commit order mode // persistent data location std::string state_file_; SavedState st_; // boolean telling if the node is safe to use for bootstrapping // a new primary component bool safe_to_bootstrap_; // currently installed trx parameters TrxHandleMaster::Params trx_params_; // identifiers wsrep_uuid_t uuid_; wsrep_uuid_t const state_uuid_; const char state_uuid_str_[37]; wsrep_seqno_t cc_seqno_; // seqno of last CC // Lowest trx seqno in cert index during last CC wsrep_seqno_t cc_lowest_trx_seqno_; wsrep_seqno_t pause_seqno_; // local seqno of last pause call // application callbacks void* app_ctx_; wsrep_connected_cb_t connected_cb_; wsrep_view_cb_t view_cb_; wsrep_sst_request_cb_t sst_request_cb_; wsrep_apply_cb_t apply_cb_; wsrep_unordered_cb_t unordered_cb_; wsrep_sst_donate_cb_t sst_donate_cb_; wsrep_synced_cb_t synced_cb_; // SST std::string sst_donor_; wsrep_uuid_t sst_uuid_; wsrep_seqno_t sst_seqno_; gu::Mutex sst_mutex_; gu::Cond sst_cond_; int sst_retry_sec_; bool sst_received_; // services gcache::GCache gcache_; GCS_IMPL gcs_; ServiceThd service_thd_; // action sources TrxHandleSlave::Pool slave_pool_; ActionSource* as_; ist::Receiver ist_receiver_; ist::AsyncSenderMap ist_senders_; // trx processing Wsdb wsdb_; Certification cert_; class PendingCertQueue { public: PendingCertQueue() : mutex_(), ts_queue_() { } void push(const TrxHandleSlavePtr& ts) { assert(ts->local()); gu::Lock lock(mutex_); ts_queue_.push(ts); ts->mark_queued(); } TrxHandleSlavePtr must_cert_next(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); TrxHandleSlavePtr ret; if (!ts_queue_.empty()) { const TrxHandleSlavePtr& top(ts_queue_.top()); assert(top->global_seqno() != seqno); if (top->global_seqno() < seqno) { ret = top; ts_queue_.pop(); } } return ret; } private: struct TrxHandleSlavePtrCmpGlobalSeqno { bool operator()(const TrxHandleSlavePtr& lhs, const TrxHandleSlavePtr& rhs) const { return lhs->global_seqno() > rhs->global_seqno(); } }; gu::Mutex mutex_; std::priority_queue, TrxHandleSlavePtrCmpGlobalSeqno> ts_queue_; }; PendingCertQueue pending_cert_queue_; // concurrency control Monitor local_monitor_; Monitor apply_monitor_; Monitor commit_monitor_; gu::datetime::Period causal_read_timeout_; // counters gu::Atomic receivers_; gu::Atomic replicated_; gu::Atomic replicated_bytes_; gu::Atomic keys_count_; gu::Atomic keys_bytes_; gu::Atomic data_bytes_; gu::Atomic unrd_bytes_; gu::Atomic local_commits_; gu::Atomic local_rollbacks_; gu::Atomic local_cert_failures_; gu::Atomic local_replays_; gu::Atomic causal_reads_; gu::Atomic preordered_id_; // temporary preordered ID // non-atomic stats std::string incoming_list_; mutable gu::Mutex incoming_mutex_; mutable std::vector wsrep_stats_; }; std::ostream& operator<<(std::ostream& os, ReplicatorSMM::State state); #ifdef GALERA_MONITOR_DEBUG_PRINT inline std::ostream& operator<<(std::ostream& os,const ReplicatorSMM::LocalOrder& o) { o.print(os); return os; } inline std::ostream& operator<<(std::ostream& os,const ReplicatorSMM::ApplyOrder& o) { o.print(os); return os; } inline std::ostream& operator<<(std::ostream& os,const ReplicatorSMM::CommitOrder& o) { o.print(os); return os; } #endif /* GALERA_MONITOR_DEBUG_PRINT */ } /* namespace galera */ #endif /* GALERA_REPLICATOR_SMM_HPP */ galera-26.4.3/galera/src/action_source.hpp0000664000177500017540000000073413540715002016752 0ustar dbartmy// // Copyright (C) 2010-2013 Codership Oy // #ifndef GALERA_ACTION_SOURCE_HPP #define GALERA_ACTION_SOURCE_HPP namespace galera { class ActionSource { public: ActionSource() { } virtual ~ActionSource() { } virtual ssize_t process(void* ctx, bool& exit_loop) = 0; virtual long long received() const = 0; virtual long long received_bytes() const = 0; }; } #endif // GALERA_ACTION_SOURCE_HPP galera-26.4.3/galera/src/galera_view.cpp0000664000177500017540000000106213540715002016370 0ustar dbartmy// // Copyright (C) 2015 Codership Oy // #include "galera_view.hpp" #include galera::View::View() : members_() { } galera::View::View(const wsrep_view_info_t& view_info) : members_() { for (int i(0); i < view_info.memb_num; ++i) { members_.insert(view_info.members[i].id); } } galera::View::~View() { } bool galera::View::subset_of(const MembSet& mset) const { return std::includes(mset.begin(), mset.end(), members_.begin(), members_.end(), UUIDCmp()); } galera-26.4.3/galera/src/mapped_buffer.cpp0000664000177500017540000000735313540715002016713 0ustar dbartmy// // Copyright (C) 2010 Codership Oy // #define _FILE_OFFSET_BITS 64 #include "mapped_buffer.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_macros.h" #include #include #include #include #include // MAP_FAILED is defined as (void *) -1 #pragma GCC diagnostic ignored "-Wold-style-cast" using namespace std; using namespace gu; galera::MappedBuffer::MappedBuffer(const std::string& working_dir, size_t threshold) : working_dir_ (working_dir), file_ (), fd_ (-1), threshold_ (threshold), buf_ (0), buf_size_ (0), real_buf_size_(0) { } galera::MappedBuffer::~MappedBuffer() { if (fd_ != -1) { struct stat st; fstat(fd_, &st); log_debug << "file size " << st.st_size; } clear(); } void galera::MappedBuffer::reserve(size_t sz) { if (real_buf_size_ >= sz) { // no need for reallocation return; } if (sz > threshold_) { // buffer size exceeds in-memory threshold, have to mmap if (gu_unlikely(std::numeric_limits::max() - sz < threshold_)) { sz = std::numeric_limits::max(); } else { sz = (sz/threshold_ + 1)*threshold_; } if (gu_unlikely(sz > static_cast(std::numeric_limits::max()))) { gu_throw_error(EINVAL) << "size exceeds maximum of off_t"; } if (fd_ == -1) { file_ = working_dir_ + "/gmb_XXXXXX"; fd_ = mkstemp(&file_[0]); if (fd_ == -1) { gu_throw_error(errno) << "mkstemp(" << file_ << ") failed"; } if (ftruncate(fd_, sz) == -1) { gu_throw_error(errno) << "ftruncate() failed"; } byte_t* tmp(reinterpret_cast( mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd_, 0))); if (tmp == MAP_FAILED) { free(buf_); buf_ = 0; clear(); gu_throw_error(ENOMEM) << "mmap() failed"; } copy(buf_, buf_ + buf_size_, tmp); free(buf_); buf_ = tmp; } else { if (munmap(buf_, real_buf_size_) != 0) { gu_throw_error(errno) << "munmap() failed"; } if (ftruncate(fd_, sz) == -1) { gu_throw_error(errno) << "fruncate() failed"; } byte_t* tmp(reinterpret_cast( mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd_, 0))); if (tmp == MAP_FAILED) { buf_ = 0; clear(); gu_throw_error(ENOMEM) << "mmap() failed"; } buf_ = tmp; } } else { sz = min(threshold_, sz*2); byte_t* tmp(reinterpret_cast(realloc(buf_, sz))); if (tmp == 0) { gu_throw_error(ENOMEM) << "realloc failed"; } buf_ = tmp; } real_buf_size_ = sz; } void galera::MappedBuffer::resize(size_t sz) { reserve(sz); buf_size_ = sz; } void galera::MappedBuffer::clear() { if (fd_ != -1) { if (buf_ != 0) munmap(buf_, real_buf_size_); while (close(fd_) == EINTR) { } unlink(file_.c_str()); } else { free(buf_); } fd_ = -1; buf_ = 0; buf_size_ = 0; real_buf_size_ = 0; } galera-26.4.3/galera/src/nbo.hpp0000664000177500017540000001102413540715002014665 0ustar dbartmy// // Copyright (C) 2015 Codership Oy // #ifndef GALERA_NBO_HPP #define GALERA_NBO_HPP #include "galera_view.hpp" #include "gu_buffer.hpp" #include "gu_serialize.hpp" #include "gu_logger.hpp" #include "gu_lock.hpp" #include "trx_handle.hpp" #include "wsrep_api.h" #include namespace galera { class TrxHandleSlave; class MappedBuffer; // Helper datatypes for NBO // Context to be shared between cert NBOEntry and TrxHandleSlave // to signal ending of NBO. class NBOCtx { public: NBOCtx() : mutex_(), cond_ (), ts_ (), aborted_(false) { } void set_ts(const TrxHandleSlavePtr& ts) { gu::Lock lock(mutex_); assert(ts != 0); assert(ts->global_seqno() != WSREP_SEQNO_UNDEFINED); ts_ = ts; cond_.broadcast(); } wsrep_seqno_t seqno() const { gu::Lock lock(mutex_); return (ts_ == 0 ? WSREP_SEQNO_UNDEFINED : ts_->global_seqno()); } TrxHandleSlavePtr wait_ts() { gu::Lock lock(mutex_); while (ts_ == 0) { try { lock.wait(cond_, gu::datetime::Date::calendar() + gu::datetime::Sec); } catch (const gu::Exception& e) { if (e.get_errno() == ETIMEDOUT) { return TrxHandleSlavePtr(); } throw; } } return ts_; } void set_aborted(bool val) { gu::Lock lock(mutex_); aborted_= val; cond_.broadcast(); } bool aborted() const { gu::Lock lock(mutex_); return aborted_; } private: NBOCtx(const NBOCtx&); NBOCtx& operator=(const NBOCtx&); gu::Mutex mutex_; gu::Cond cond_; TrxHandleSlavePtr ts_; bool aborted_; }; // Key for NBOMap class NBOKey { public: NBOKey() : seqno_(WSREP_SEQNO_UNDEFINED) { } NBOKey(const wsrep_seqno_t seqno) : seqno_(seqno) { } wsrep_seqno_t seqno() const { return seqno_; } bool operator<(const NBOKey& other) const { return (seqno_ < other.seqno_); } size_t serialize(gu::byte_t* buf, size_t buf_len, size_t offset) { return gu::serialize8(seqno_, buf, buf_len, offset); } size_t unserialize(const gu::byte_t* buf, size_t buf_len, size_t offset) { return gu::unserialize8(buf, buf_len, offset, seqno_); } static size_t serial_size() { return 8; //gu::serial_size8(wsrep_seqno_t()); } private: wsrep_seqno_t seqno_; }; // Entry for NBOMap class NBOEntry { public: NBOEntry( gu::shared_ptr::type ts, gu::shared_ptr::type buf, gu::shared_ptr::type nbo_ctx) : ts_ (ts), buf_(buf), ended_set_(), nbo_ctx_(nbo_ctx) { } TrxHandleSlave* ts_ptr() { return ts_.get(); } // const TrxHandleSlave* ts_ptr() const { return ts_.get(); } void add_ended(const wsrep_uuid_t& uuid) { std::pair ret( ended_set_.insert(uuid)); if (ret.second == false) { log_warn << "duplicate entry " << uuid << " for ended set"; } } void clear_ended() { ended_set_.clear(); } const View::MembSet& ended_set() const { return ended_set_; } void end(const TrxHandleSlavePtr& ts) { assert(ts != 0); nbo_ctx_->set_ts(ts); } gu::shared_ptr::type nbo_ctx() { return nbo_ctx_; } private: gu::shared_ptr::type ts_; gu::shared_ptr::type buf_; View::MembSet ended_set_; gu::shared_ptr::type nbo_ctx_; }; typedef std::map::type> NBOCtxMap; typedef std::map NBOMap; } #endif // !GALERA_NBO_HPP galera-26.4.3/galera/src/wsrep_provider.cpp0000664000177500017540000012122713540715002017163 0ustar dbartmy// // Copyright (C) 2010-2017 Codership Oy // #include "key_data.hpp" #include "gu_serialize.hpp" #if defined(GALERA_MULTIMASTER) #include "replicator_smm.hpp" #define REPL_CLASS galera::ReplicatorSMM #else #error "Not implemented" #endif #include "wsrep_params.hpp" #include using galera::KeyOS; using galera::WriteSet; using galera::TrxHandle; using galera::TrxHandleMaster; using galera::TrxHandleSlave; using galera::TrxHandleLock; extern "C" { const char* wsrep_interface_version = (char*)WSREP_INTERFACE_VERSION; } extern "C" wsrep_status_t galera_init(wsrep_t* gh, const struct wsrep_init_args* args) { assert(gh != 0); try { gh->ctx = new REPL_CLASS (args); // Moved into galera::ReplicatorSMM::ParseOptions::ParseOptions() // wsrep_set_params(*reinterpret_cast(gh->ctx), // args->options); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); } #ifdef NDEBUG catch (std::exception& e) { log_error << e.what(); } catch (gu::NotFound& e) { /* Unrecognized parameter (logged by gu::Config::set()) */ } catch (...) { log_fatal << "non-standard exception"; } #endif return WSREP_NODE_FAIL; } extern "C" wsrep_cap_t galera_capabilities(wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return repl->capabilities(); } extern "C" void galera_tear_down(wsrep_t* gh) { assert(gh != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); if (repl != 0) { delete repl; gh->ctx = 0; } } extern "C" wsrep_status_t galera_parameters_set (wsrep_t* gh, const char* params) { assert(gh != 0); // cppcheck-suppress nullPointer assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); // cppcheck-suppress nullPointer if (gh) { try { wsrep_set_params (*repl, params); return WSREP_OK; } catch (gu::NotFound&) { log_warn << "Unrecognized parameter in '" << params << "'"; return WSREP_WARNING; } catch (std::exception& e) { log_debug << e.what(); // better logged in wsrep_set_params } } else { log_error << "Attempt to set parameter(s) on uninitialized replicator."; } return WSREP_NODE_FAIL; } extern "C" char* galera_parameters_get (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); try { REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return wsrep_get_params(*repl); } catch (std::exception& e) { log_error << e.what(); return 0; } catch (...) { log_fatal << "non-standard exception"; return 0; } } extern "C" wsrep_status_t galera_enc_set_key(wsrep_t* gh, const wsrep_enc_key_t*key) { return WSREP_NOT_IMPLEMENTED; } extern "C" wsrep_status_t galera_connect (wsrep_t* gh, const char* cluster_name, const char* cluster_url, const char* state_donor, wsrep_bool_t bootstrap) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->connect(cluster_name, cluster_url, state_donor ? state_donor : "", bootstrap); } catch (gu::Exception& e) { log_error << "Failed to connect to cluster: " << e.what(); return WSREP_NODE_FAIL; } #ifdef NDEBUG catch (std::exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } #endif /* NDEBUG */ } extern "C" wsrep_status_t galera_disconnect(wsrep_t *gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->close(); } catch (std::exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_recv(wsrep_t *gh, void *recv_ctx) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); #ifdef NDEBUG try { #endif /* NDEBUG */ return repl->async_recv(recv_ctx); #ifdef NDEBUG } catch (gu::Exception& e) { log_error << e.what(); switch (e.get_errno()) { case ENOTRECOVERABLE: return WSREP_FATAL; default: return WSREP_NODE_FAIL; } } catch (std::exception& e) { log_error << e.what(); } catch (...) { log_fatal << "non-standard exception"; } return WSREP_FATAL; #endif /* NDEBUG */ } static TrxHandleMaster* get_local_trx(REPL_CLASS* const repl, wsrep_ws_handle_t* const handle, bool const create) { TrxHandleMaster* trx(0); assert(handle != 0); if (handle->opaque != 0) { trx = static_cast(handle->opaque); assert(trx->trx_id() == handle->trx_id || wsrep_trx_id_t(-1) == handle->trx_id); } else { try { trx = repl->get_local_trx(handle->trx_id, create).get(); handle->opaque = trx; } catch (gu::NotFound& ) { } } return trx; } extern "C" wsrep_status_t galera_replay_trx(wsrep_t* gh, const wsrep_ws_handle_t* trx_handle, void* recv_ctx) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandleMaster* trx(static_cast(trx_handle->opaque)); assert(trx != 0); assert(trx->ts() != 0); log_debug << "replaying " << *(trx->ts()); wsrep_status_t retval; try { TrxHandleLock lock(*trx); retval = repl->replay_trx(*trx, lock, recv_ctx); } catch (std::exception& e) { log_warn << "failed to replay trx: " << *trx; log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } if (retval != WSREP_OK) { log_debug << "replaying failed for " << *(trx->ts()); } return retval; } extern "C" wsrep_status_t galera_abort_certification(wsrep_t* gh, wsrep_seqno_t bf_seqno, wsrep_trx_id_t victim_trx, wsrep_seqno_t* victim_seqno) { assert(gh != 0); assert(gh->ctx != 0); assert(victim_seqno != 0); *victim_seqno = WSREP_SEQNO_UNDEFINED; REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); wsrep_status_t retval; galera::TrxHandleMasterPtr txp(repl->get_local_trx(victim_trx)); if (!txp) { log_debug << "trx to abort " << victim_trx << " with bf seqno " << bf_seqno << " not found"; return WSREP_OK; } else { log_debug << "ABORTING trx " << victim_trx << " with bf seqno " << bf_seqno; } try { TrxHandleMaster& trx(*txp); TrxHandleLock lock(trx); retval = repl->abort_trx(trx, bf_seqno, victim_seqno); } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } GU_DBUG_SYNC_WAIT("abort_trx_end"); return retval; } extern "C" wsrep_status_t galera_rollback(wsrep_t* gh, wsrep_trx_id_t trx_id, const wsrep_buf_t* const data) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); galera::TrxHandleMasterPtr victim(repl->get_local_trx(trx_id)); if (!victim) { log_debug << "trx to rollback " << trx_id << " not found"; return WSREP_OK; } TrxHandleLock victim_lock(*victim); /* Send the rollback fragment from a different context */ galera::TrxHandleMasterPtr trx(repl->new_local_trx(trx_id)); TrxHandleLock lock(*trx); if (data) { gu_trace(trx->append_data(data->ptr, data->len, WSREP_DATA_ORDERED, true)); } wsrep_trx_meta_t meta; meta.gtid = WSREP_GTID_UNDEFINED; meta.depends_on = WSREP_SEQNO_UNDEFINED; meta.stid.node = repl->source_id(); meta.stid.trx = trx_id; trx->set_flags(TrxHandle::EXPLICIT_ROLLBACK_FLAGS); trx->set_state(TrxHandle::S_MUST_ABORT); trx->set_state(TrxHandle::S_ABORTING); // Victim may already be in S_ABORTING state if it was BF aborted // in pre commit. if (victim->state() != TrxHandle::S_ABORTING) { if (victim->state() != TrxHandle::S_MUST_ABORT) victim->set_state(TrxHandle::S_MUST_ABORT); victim->set_state(TrxHandle::S_ABORTING); } return repl->send(*trx, &meta); } static inline void discard_local_trx(REPL_CLASS* repl, wsrep_ws_handle_t* ws_handle, TrxHandleMaster* trx) { repl->discard_local_trx(trx); ws_handle->opaque = 0; } static inline void append_data_array (TrxHandleMaster& trx, const struct wsrep_buf* const data, size_t const count, wsrep_data_type_t const type, bool const copy) { for (size_t i(0); i < count; ++i) { gu_trace(trx.append_data(data[i].ptr, data[i].len, type, copy)); } } extern "C" wsrep_status_t galera_assign_read_view(wsrep_t* const gh, wsrep_ws_handle_t* const handle, const wsrep_gtid_t* const rv) { return WSREP_NOT_IMPLEMENTED; } extern "C" wsrep_status_t galera_certify(wsrep_t* const gh, wsrep_conn_id_t const conn_id, wsrep_ws_handle_t* const trx_handle, uint32_t const flags, wsrep_trx_meta_t* const meta) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * const repl(static_cast< REPL_CLASS * >(gh->ctx)); TrxHandleMaster* txp(get_local_trx(repl, trx_handle, false)); // The following combinations of flags should not be set together assert(!((flags & WSREP_FLAG_TRX_START) && (flags & WSREP_FLAG_ROLLBACK))); assert(!((flags & WSREP_FLAG_TRX_PREPARE) && (flags & WSREP_FLAG_ROLLBACK))); assert(!((flags & WSREP_FLAG_TRX_PREPARE) && (flags & WSREP_FLAG_TRX_END))); if (gu_unlikely(txp == 0)) { if (meta != 0) { meta->gtid = WSREP_GTID_UNDEFINED; meta->depends_on = WSREP_SEQNO_UNDEFINED; meta->stid.node = repl->source_id(); meta->stid.trx = -1; } // no data to replicate return WSREP_OK; } TrxHandleMaster& trx(*txp); assert(trx.trx_id() != uint64_t(-1)); if (meta != 0) { meta->gtid = WSREP_GTID_UNDEFINED; meta->depends_on = WSREP_SEQNO_UNDEFINED; meta->stid.node = trx.source_id(); meta->stid.trx = trx.trx_id(); } wsrep_status_t retval; try { TrxHandleLock lock(trx); trx.set_conn_id(conn_id); trx.set_flags(trx.flags() | TrxHandle::wsrep_flags_to_trx_flags(flags)); if (flags & WSREP_FLAG_ROLLBACK) { if ((trx.flags() & (TrxHandle::F_BEGIN | TrxHandle::F_ROLLBACK)) == (TrxHandle::F_BEGIN | TrxHandle::F_ROLLBACK)) { return WSREP_TRX_MISSING; } trx.set_flags(trx.flags() | TrxHandle::F_PA_UNSAFE); if (trx.state() == TrxHandle::S_ABORTING) { trx.set_state(TrxHandle::S_EXECUTING); } } retval = repl->replicate(trx, meta); if (meta) { if (trx.ts()) { assert(meta->gtid.seqno > 0); assert(meta->gtid.seqno == trx.ts()->global_seqno()); assert(meta->depends_on == trx.ts()->depends_seqno()); } else { assert(meta->gtid.seqno == WSREP_SEQNO_UNDEFINED); assert(meta->depends_on == WSREP_SEQNO_UNDEFINED); } } assert(trx.trx_id() == meta->stid.trx); assert(!(retval == WSREP_OK || retval == WSREP_BF_ABORT) || (trx.ts() && trx.ts()->global_seqno() > 0)); if (retval == WSREP_OK) { assert(trx.state() != TrxHandle::S_MUST_ABORT); if ((flags & WSREP_FLAG_ROLLBACK) == 0) { assert(trx.ts() && trx.ts()->last_seen_seqno() >= 0); retval = repl->certify(trx, meta); assert(trx.state() != TrxHandle::S_MUST_ABORT || retval != WSREP_OK); if (meta) assert(meta->depends_on >= 0 || retval != WSREP_OK); } } else { if (meta) meta->depends_on = -1; } assert(retval == WSREP_OK || // success retval == WSREP_TRX_FAIL || // cert failure retval == WSREP_BF_ABORT || // BF abort retval == WSREP_CONN_FAIL|| // not in joined/synced state retval == WSREP_NODE_FAIL); // node inconsistent } catch (gu::Exception& e) { log_error << e.what(); if (e.get_errno() == EMSGSIZE) retval = WSREP_SIZE_EXCEEDED; else retval = WSREP_NODE_FAIL; } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } trx.release_write_set_out(); return retval; } extern "C" wsrep_status_t galera_commit_order_enter( wsrep_t* const gh, const wsrep_ws_handle_t* const ws_handle, const wsrep_trx_meta_t* const meta ) { assert(gh != 0); assert(gh->ctx != 0); assert(ws_handle != 0); REPL_CLASS * const repl(static_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* const txp(static_cast(ws_handle->opaque)); assert(NULL != txp); if (txp == 0) { log_warn << "Trx " << ws_handle->trx_id << " not found for commit order enter"; return WSREP_TRX_MISSING; } wsrep_status_t retval; try { if (txp->master()) { TrxHandleMaster& trx(*reinterpret_cast(txp)); TrxHandleLock lock(trx); // assert(trx.state() != TrxHandle::S_REPLAYING); if (gu_unlikely(trx.state() == TrxHandle::S_MUST_ABORT)) { if (trx.ts() && (trx.ts()->flags() & TrxHandle::F_COMMIT)) { trx.set_state(TrxHandle::S_MUST_REPLAY); return WSREP_BF_ABORT; } else { trx.set_state(TrxHandle::S_ABORTING); return WSREP_TRX_FAIL; } } retval = repl->commit_order_enter_local(trx); } else { TrxHandleSlave& ts(*reinterpret_cast(txp)); retval = repl->commit_order_enter_remote(ts); } } catch (std::exception& e) { log_error << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_commit_order_leave( wsrep_t* const gh, const wsrep_ws_handle_t* const ws_handle, const wsrep_trx_meta_t* const meta, const wsrep_buf_t* const error ) { assert(gh != 0); assert(gh->ctx != 0); assert(ws_handle != 0); REPL_CLASS * const repl(static_cast< REPL_CLASS * >(gh->ctx)); TrxHandle* const txp(static_cast(ws_handle->opaque)); assert(NULL != txp); if (txp == NULL) { log_warn << "Trx " << ws_handle->trx_id << " not found for commit order leave"; return WSREP_TRX_MISSING; } wsrep_status_t retval; try { if (txp->master()) { TrxHandleMaster& trx(*reinterpret_cast(txp)); TrxHandleLock lock(trx); assert(trx.ts() && trx.ts()->global_seqno() > 0); if (trx.state() == TrxHandle::S_MUST_ABORT) { // Trx is non-committing streaming replication and // the trx was BF aborted while committing a fragment. // At this point however, we can't know if the // fragment is already committed into DBMS fragment storage // or not, so we return a success. The BF abort error // is returned to the caller from galera_release(). assert(!(trx.ts()->flags() & TrxHandle::F_COMMIT)); trx.set_state(TrxHandle::S_ABORTING); retval = repl->commit_order_leave(*trx.ts(), error); trx.set_deferred_abort(true); } else { retval = repl->commit_order_leave(*trx.ts(), error); assert(trx.state() == TrxHandle::S_ROLLING_BACK || trx.state() == TrxHandle::S_COMMITTING || !(trx.ts()->flags() & TrxHandle::F_COMMIT)); trx.set_state(trx.state() == TrxHandle::S_ROLLING_BACK ? TrxHandle::S_ROLLED_BACK : TrxHandle::S_COMMITTED); } } else { TrxHandleSlave& ts(*reinterpret_cast(txp)); retval = repl->commit_order_leave(ts, error); } } catch (std::exception& e) { log_error << "commit_order_leave(): " << e.what(); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "commit_order_leave(): non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_release(wsrep_t* gh, wsrep_ws_handle_t* ws_handle) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandleMaster* txp(get_local_trx(repl, ws_handle, false)); if (txp == 0) { log_debug << "trx " << ws_handle->trx_id << " not found for release"; return WSREP_OK; } wsrep_status_t retval; bool discard_trx(true); try { TrxHandleMaster& trx(*txp); TrxHandleLock lock(trx); if (trx.state() == TrxHandle::S_MUST_ABORT) { // This is possible in case of ALG due to a race: BF applier BF // aborts trx that has already grabbed commit monitor and is // committing. This is possible only if aborter is ordered after // the victim, and since for regular committing transactions such // abort is unnecessary, this should be possible only for ongoing // streaming transactions. galera::TrxHandleSlavePtr ts(trx.ts()); if (ts && ts->flags() & TrxHandle::F_COMMIT) { log_warn << "trx was BF aborted during commit: " << *ts; assert(0); // manipulate state to avoid crash trx.set_state(TrxHandle::S_MUST_REPLAY); trx.set_state(TrxHandle::S_REPLAYING); } else { // Streaming replication, not in commit phase. Must abort. log_debug << "SR trx was BF aborted during commit: " << trx; trx.set_state(TrxHandle::S_ABORTING); } } if (gu_likely(trx.state() == TrxHandle::S_COMMITTED)) { assert(!trx.deferred_abort()); retval = repl->release_commit(trx); assert(trx.state() == TrxHandle::S_COMMITTED || trx.state() == TrxHandle::S_EXECUTING); if (trx.state() == TrxHandle::S_EXECUTING && retval == WSREP_OK) { // SR trx ready for new fragment, keep transaction discard_trx = false; } } else if (trx.deferred_abort() == false) { retval = repl->release_rollback(trx); assert(trx.state() == TrxHandle::S_ROLLED_BACK); } else if (trx.state() == TrxHandle::S_ABORTING) { assert(trx.deferred_abort()); // SR trx was BF aborted before commit_order_leave() // We return BF abort error code here and do not clean up // the transaction. The transaction is needed for sending // rollback fragment. retval = WSREP_BF_ABORT; discard_trx = false; trx.set_deferred_abort(false); } else { assert(0); gu_throw_fatal << "Internal program error: " "unexpected state in deferred abort trx: " << trx; } switch(trx.state()) { case TrxHandle::S_COMMITTED: case TrxHandle::S_ROLLED_BACK: case TrxHandle::S_EXECUTING: case TrxHandle::S_ABORTING: break; default: assert(0); gu_throw_fatal << "Internal library error: " "unexpected trx release state: " << trx; } } catch (std::exception& e) { log_error << e.what(); assert(0); retval = WSREP_NODE_FAIL; } catch (...) { log_fatal << "non-standard exception"; assert(0); retval = WSREP_FATAL; } if (discard_trx) { discard_local_trx(repl, ws_handle, txp); } return retval; } extern "C" wsrep_status_t galera_append_key(wsrep_t* const gh, wsrep_ws_handle_t* const trx_handle, const wsrep_key_t* const keys, size_t const keys_num, wsrep_key_type_t const key_type, wsrep_bool_t const copy) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); TrxHandleMaster* trx(get_local_trx(repl, trx_handle, true)); assert(trx != 0); wsrep_status_t retval; try { TrxHandleLock lock(*trx); for (size_t i(0); i < keys_num; ++i) { galera::KeyData k (repl->trx_proto_ver(), keys[i].key_parts, keys[i].key_parts_num, key_type, copy); gu_trace(trx->append_key(k)); } retval = WSREP_OK; } catch (gu::Exception& e) { log_warn << e.what(); if (EMSGSIZE == e.get_errno()) retval = WSREP_SIZE_EXCEEDED; else retval = WSREP_CONN_FAIL; //? } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_append_data(wsrep_t* const wsrep, wsrep_ws_handle_t* const trx_handle, const struct wsrep_buf* const data, size_t const count, wsrep_data_type_t const type, wsrep_bool_t const copy) { assert(wsrep != 0); assert(wsrep->ctx != 0); assert(data != NULL); assert(count > 0); if (data == NULL) { // no data to replicate return WSREP_OK; } REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(wsrep->ctx)); TrxHandleMaster* txp(get_local_trx(repl, trx_handle, true)); assert(txp != 0); TrxHandleMaster& trx(*txp); wsrep_status_t retval; try { TrxHandleLock lock(trx); gu_trace(append_data_array(trx, data, count, type, copy)); retval = WSREP_OK; } catch (gu::Exception& e) { log_warn << e.what(); if (EMSGSIZE == e.get_errno()) retval = WSREP_SIZE_EXCEEDED; else retval = WSREP_CONN_FAIL; //? } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_sync_wait(wsrep_t* const wsrep, wsrep_gtid_t* const upto, int tout, wsrep_gtid_t* const gtid) { assert(wsrep != 0); assert(wsrep->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(wsrep->ctx)); wsrep_status_t retval; try { retval = repl->sync_wait(upto, tout, gtid); } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_last_committed_id(wsrep_t* const wsrep, wsrep_gtid_t* const gtid) { assert(wsrep != 0); assert(wsrep->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(wsrep->ctx)); wsrep_status_t retval; try { retval = repl->last_committed_id(gtid); } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } return retval; } extern "C" wsrep_status_t galera_free_connection(wsrep_t* const gh, wsrep_conn_id_t const conn_id) { assert(gh != 0); assert(gh->ctx != 0); // This function is now no-op and can be removed from the // future versions. Connection object is allocated only from // galera_to_execute_start() and will be released either // from that function in case of failure or from // galera_to_execute_end(). return WSREP_OK; } extern "C" wsrep_status_t galera_to_execute_start(wsrep_t* const gh, wsrep_conn_id_t const conn_id, const wsrep_key_t* const keys, size_t const keys_num, const struct wsrep_buf* const data, size_t const count, uint32_t const flags, wsrep_trx_meta_t* const meta) { assert(gh != 0); assert(gh->ctx != 0); // Non-blocking operations "certification" depends on // TRX_START and TRX_END flags, not having those flags may cause // undefined behavior so check them here. assert(flags & (WSREP_FLAG_TRX_START | WSREP_FLAG_TRX_END)); if ((flags & (WSREP_FLAG_TRX_START | WSREP_FLAG_TRX_END)) == 0) { log_warn << "to_execute_start(): either WSREP_FLAG_TRX_START " << "or WSREP_FLAG_TRX_END flag is required"; return WSREP_CONN_FAIL; } // Simultaneous use of TRX_END AND ROLLBACK is not allowed assert(!((flags & WSREP_FLAG_TRX_END) && (flags & WSREP_FLAG_ROLLBACK))); if ((flags & WSREP_FLAG_TRX_END) && (flags & WSREP_FLAG_ROLLBACK)) { log_warn << "to_execute_start(): simultaneous use of " << "WSREP_FLAG_TRX_END and WSREP_FLAG_ROLLBACK " << "is not allowed"; return WSREP_CONN_FAIL; } REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); galera::TrxHandleMasterPtr txp(repl->local_conn_trx(conn_id, true)); assert(txp != 0); TrxHandleMaster& trx(*txp.get()); assert(trx.state() == TrxHandle::S_EXECUTING); trx.set_flags(TrxHandle::wsrep_flags_to_trx_flags( flags | WSREP_FLAG_ISOLATION)); // NBO-end event. Application should have provided the ongoing // operation start event source node id and connection id in // meta->stid.node and meta->stid.conn respectively if (trx.nbo_end() == true) { galera::NBOKey key(meta->gtid.seqno); gu::Buffer buf(galera::NBOKey::serial_size()); (void)key.serialize(&buf[0], buf.size(), 0); struct wsrep_buf data_buf = {&buf[0], buf.size()}; gu_trace(append_data_array(trx, &data_buf, 1, WSREP_DATA_ORDERED,true)); } if (meta != 0) { // Don't override trx meta gtid for NBO end yet, gtid is used in // replicator wait_nbo_end() to locate correct nbo context if (trx.nbo_end() == false) { meta->gtid = WSREP_GTID_UNDEFINED; } meta->depends_on = WSREP_SEQNO_UNDEFINED; meta->stid.node = trx.source_id(); meta->stid.trx = trx.trx_id(); meta->stid.conn = trx.conn_id(); } wsrep_status_t retval; #ifdef NDEBUG try #endif // NDEBUG { TrxHandleLock lock(trx); for (size_t i(0); i < keys_num; ++i) { galera::KeyData k(repl->trx_proto_ver(), keys[i].key_parts, keys[i].key_parts_num, WSREP_KEY_EXCLUSIVE,false); gu_trace(trx.append_key(k)); } gu_trace(append_data_array(trx, data, count, WSREP_DATA_ORDERED, false)); if (trx.nbo_end() == false) { retval = repl->replicate(trx, meta); assert((retval == WSREP_OK && trx.ts() != 0 && trx.ts()->global_seqno() > 0) || (retval != WSREP_OK && (trx.ts() == 0 || trx.ts()->global_seqno() < 0))); if (meta) { if (trx.ts()) { assert(meta->gtid.seqno > 0); assert(meta->gtid.seqno == trx.ts()->global_seqno()); assert(meta->depends_on == trx.ts()->depends_seqno()); } else { assert(meta->gtid.seqno == WSREP_SEQNO_UNDEFINED); assert(meta->depends_on == WSREP_SEQNO_UNDEFINED); } } } else { // NBO-end events are broadcasted separately in to_isolation_begin() retval = WSREP_OK; } if (retval == WSREP_OK) { retval = repl->to_isolation_begin(trx, meta); } } #ifdef NDEBUG catch (gu::Exception& e) { log_error << e.what(); if (e.get_errno() == EMSGSIZE) retval = WSREP_SIZE_EXCEEDED; else retval = WSREP_CONN_FAIL; } catch (std::exception& e) { log_warn << e.what(); retval = WSREP_CONN_FAIL; } catch (...) { log_fatal << "non-standard exception"; retval = WSREP_FATAL; } #endif // NDEBUG if (trx.ts() == NULL || trx.ts()->global_seqno() < 0) { // galera_to_execute_end() won't be called repl->discard_local_conn_trx(conn_id); // trx is not needed anymore } return retval; } extern "C" wsrep_status_t galera_to_execute_end(wsrep_t* const gh, wsrep_conn_id_t const conn_id, const wsrep_buf_t* const err) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); galera::TrxHandleMasterPtr trx(repl->local_conn_trx(conn_id, false)); assert(trx != 0); if (trx == 0) { log_warn << "No trx handle for connection " << conn_id << " in galera_to_execute_end()"; return WSREP_CONN_FAIL; } wsrep_status_t ret(WSREP_OK); try { TrxHandleLock lock(*trx); repl->to_isolation_end(*trx, err); } catch (std::exception& e) { log_error << "to_execute_end(): " << e.what(); ret = WSREP_NODE_FAIL; } catch (...) { log_fatal << "to_execute_end(): non-standard exception"; ret = WSREP_FATAL; } gu_trace(repl->discard_local_conn_trx(conn_id)); // trx will be unreferenced (destructed) during purge repl->discard_local_conn_trx(conn_id); return ret; } extern "C" wsrep_status_t galera_preordered_collect (wsrep_t* const gh, wsrep_po_handle_t* const handle, const struct wsrep_buf* const data, size_t const count, wsrep_bool_t const copy) { assert(gh != 0); assert(gh->ctx != 0); assert(handle != 0); assert(data != 0); assert(count > 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->preordered_collect(*handle, data, count, copy); } catch (std::exception& e) { log_warn << e.what(); return WSREP_TRX_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_preordered_commit (wsrep_t* const gh, wsrep_po_handle_t* const handle, const wsrep_uuid_t* const source_id, uint32_t const flags, int const pa_range, wsrep_bool_t const commit) { assert(gh != 0); assert(gh->ctx != 0); assert(handle != 0); assert(source_id != 0 || false == commit); assert(pa_range >= 0 || false == commit); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->preordered_commit(*handle, *source_id, flags, pa_range, commit); } catch (std::exception& e) { log_warn << e.what(); return WSREP_TRX_FAIL; } catch (...) { log_fatal << "non-standard exception"; return WSREP_FATAL; } } extern "C" wsrep_status_t galera_sst_sent (wsrep_t* const gh, const wsrep_gtid_t* const state_id, int const rcode) { assert(gh != 0); assert(gh->ctx != 0); assert(state_id != 0); assert(rcode <= 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return repl->sst_sent(*state_id, rcode); } extern "C" wsrep_status_t galera_sst_received (wsrep_t* const gh, const wsrep_gtid_t* const state_id, const wsrep_buf_t* const state, int const rcode) { assert(gh != 0); assert(gh->ctx != 0); assert(state_id != 0); assert(rcode <= 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); if (rcode < 0) { assert(state_id->seqno == WSREP_SEQNO_UNDEFINED); } return repl->sst_received(*state_id, state, rcode); } extern "C" wsrep_status_t galera_snapshot(wsrep_t* const wsrep, const wsrep_buf_t* const msg, const char* const donor_spec) { return WSREP_NOT_IMPLEMENTED; } extern "C" struct wsrep_stats_var* galera_stats_get (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS* repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return const_cast(repl->stats_get()); } extern "C" void galera_stats_free (wsrep_t* gh, struct wsrep_stats_var* s) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS* repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); return repl->stats_free(s); //REPL_CLASS::stats_free(s); } extern "C" void galera_stats_reset (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS* repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); repl->stats_reset(); } extern "C" wsrep_seqno_t galera_pause (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { return repl->pause(); } catch (gu::Exception& e) { log_error << e.what(); return -e.get_errno(); } } extern "C" wsrep_status_t galera_resume (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->resume(); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } } extern "C" wsrep_status_t galera_desync (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->desync(); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); return WSREP_TRX_FAIL; } } extern "C" wsrep_status_t galera_resync (wsrep_t* gh) { assert(gh != 0); assert(gh->ctx != 0); REPL_CLASS * repl(reinterpret_cast< REPL_CLASS * >(gh->ctx)); try { repl->resync(); return WSREP_OK; } catch (gu::Exception& e) { log_error << e.what(); return WSREP_NODE_FAIL; } } extern "C" wsrep_status_t galera_lock (wsrep_t* gh, const char* name, wsrep_bool_t shared, uint64_t owner, int64_t timeout) { assert(gh != 0); assert(gh->ctx != 0); return WSREP_NOT_IMPLEMENTED; } extern "C" wsrep_status_t galera_unlock (wsrep_t* gh, const char* name, uint64_t owner) { assert(gh != 0); assert(gh->ctx != 0); return WSREP_OK; } extern "C" bool galera_is_locked (wsrep_t* gh, const char* name, uint64_t* owner, wsrep_uuid_t* node) { assert(gh != 0); assert(gh->ctx != 0); return false; } static wsrep_t galera_str = { WSREP_INTERFACE_VERSION, &galera_init, &galera_capabilities, &galera_parameters_set, &galera_parameters_get, &galera_enc_set_key, &galera_connect, &galera_disconnect, &galera_recv, &galera_assign_read_view, &galera_certify, &galera_commit_order_enter, &galera_commit_order_leave, &galera_release, &galera_replay_trx, &galera_abort_certification, &galera_rollback, &galera_append_key, &galera_append_data, &galera_sync_wait, &galera_last_committed_id, &galera_free_connection, &galera_to_execute_start, &galera_to_execute_end, &galera_preordered_collect, &galera_preordered_commit, &galera_sst_sent, &galera_sst_received, &galera_snapshot, &galera_stats_get, &galera_stats_free, &galera_stats_reset, &galera_pause, &galera_resume, &galera_desync, &galera_resync, &galera_lock, &galera_unlock, &galera_is_locked, "Galera", GALERA_VER "(r" GALERA_REV ")", "Codership Oy ", &galera_tear_down, NULL, NULL }; /* Prototype to make compiler happy */ extern "C" int wsrep_loader(wsrep_t *hptr); extern "C" int wsrep_loader(wsrep_t *hptr) { if (!hptr) return EINVAL; try { *hptr = galera_str; } catch (...) { return ENOTRECOVERABLE; } return WSREP_OK; } galera-26.4.3/galera/src/monitor.hpp0000664000177500017540000003775713540715002015623 0ustar dbartmy// // Copyright (C) 2010-2013 Codership Oy // #ifndef GALERA_MONITOR_HPP #define GALERA_MONITOR_HPP #include "trx_handle.hpp" #include // for gu::Mutex and gu::Cond #include #include namespace galera { template class Monitor { private: struct Process { Process() : obj_(0), cond_(), wait_cond_(), state_(S_IDLE) #ifndef NDEBUG ,dobj_() #endif /* NDEBUG */ { } const C* obj_; gu::Cond cond_; gu::Cond wait_cond_; enum State { S_IDLE, // Slot is free S_WAITING, // Waiting to enter applying critical section S_CANCELED, S_APPLYING, // Applying S_FINISHED // Finished } state_; #ifndef NDEBUG C dobj_; #endif /* NDEBUG */ private: // non-copyable Process(const Process& other); void operator=(const Process&); }; static const ssize_t process_size_ = (1ULL << 16); static const size_t process_mask_ = process_size_ - 1; public: Monitor() : mutex_(), cond_(), uuid_(WSREP_UUID_UNDEFINED), last_entered_(-1), last_left_(-1), drain_seqno_(GU_LLONG_MAX), process_(new Process[process_size_]), entered_(0), oooe_(0), oool_(0), win_size_(0) { } ~Monitor() { delete[] process_; if (entered_ > 0) { log_info << "mon: entered " << entered_ << " oooe fraction " << double(oooe_)/entered_ << " oool fraction " << double(oool_)/entered_; } else { log_info << "apply mon: entered 0"; } } /* * For ordered CC events this had to be changed: * - it either resets position to -1 or * - merely advances it to seqno if current position is behind. * Assumes that monitor has been drained. */ void set_initial_position(const wsrep_uuid_t& uuid, wsrep_seqno_t const seqno) { gu::Lock lock(mutex_); state_debug_print("set_initial_position", seqno); uuid_ = uuid; if (last_entered_ == -1 || seqno == -1) { // first call or reset last_entered_ = last_left_ = seqno; } else #if 1 // now { if (last_left_ < seqno) last_left_ = seqno; if (last_entered_ < last_left_) last_entered_ = last_left_; } // some drainers may wait for us here cond_.broadcast(); #else // before { // drain monitor up to seqno but don't reset last_entered_ // or last_left_ drain_common(seqno, lock); drain_seqno_ = GU_LLONG_MAX; } #endif if (seqno != -1) { const size_t idx(indexof(seqno)); process_[idx].wait_cond_.broadcast(); } } void enter(C& obj) { const wsrep_seqno_t obj_seqno(obj.seqno()); const size_t idx(indexof(obj_seqno)); gu::Lock lock(mutex_); state_debug_print("enter", obj); assert(obj_seqno > last_left_); pre_enter(obj, lock); if (gu_likely(process_[idx].state_ != Process::S_CANCELED)) { assert(process_[idx].state_ == Process::S_IDLE); process_[idx].state_ = Process::S_WAITING; process_[idx].obj_ = &obj; #ifndef NDEBUG process_[idx].dobj_.~C(); new (&process_[idx].dobj_) C(obj); #endif /* NDEBUG */ #ifdef GU_DBUG_ON obj.debug_sync(mutex_); #endif // GU_DBUG_ON while (may_enter(obj) == false && process_[idx].state_ == Process::S_WAITING) { lock.wait(process_[idx].cond_); } if (process_[idx].state_ != Process::S_CANCELED) { assert(process_[idx].state_ == Process::S_WAITING || process_[idx].state_ == Process::S_APPLYING); process_[idx].state_ = Process::S_APPLYING; ++entered_; oooe_ += ((last_left_ + 1) < obj_seqno); win_size_ += (last_entered_ - last_left_); return; } } assert(process_[idx].state_ == Process::S_CANCELED); process_[idx].state_ = Process::S_IDLE; state_debug_print("enter canceled", obj); gu_throw_error(EINTR); } bool entered(const C& obj) const { return state(obj) == Process::S_APPLYING; } bool finished(const C& obj) const { return state(obj) == Process::S_FINISHED; } bool canceled(const C& obj) const { return state(obj) == Process::S_CANCELED; } void leave(const C& obj) { #ifndef NDEBUG size_t idx(indexof(obj.seqno())); #endif /* NDEBUG */ gu::Lock lock(mutex_); state_debug_print("leave", obj); assert(process_[idx].state_ == Process::S_APPLYING || process_[idx].state_ == Process::S_CANCELED); assert(process_[indexof(last_left_)].state_ == Process::S_IDLE); post_leave(obj.seqno(), lock); } void self_cancel(C& obj) { wsrep_seqno_t const obj_seqno(obj.seqno()); size_t idx(indexof(obj_seqno)); gu::Lock lock(mutex_); state_debug_print("self_cancel", obj); assert(obj_seqno > last_left_); while (obj_seqno - last_left_ >= process_size_) // TODO: exit on error { log_warn << "Trying to self-cancel seqno out of process " << "space: obj_seqno - last_left_ = " << obj_seqno << " - " << last_left_ << " = " << (obj_seqno - last_left_) << ", process_size_: " << process_size_ << ". Deadlock is very likely."; lock.wait(cond_); } assert(process_[idx].state_ == Process::S_IDLE || process_[idx].state_ == Process::S_CANCELED); #ifndef NDEBUG process_[idx].dobj_.~C(); new (&process_[idx].dobj_) C(obj); #endif /* NDEBUG */ if (obj_seqno > last_entered_) last_entered_ = obj_seqno; if (obj_seqno <= drain_seqno_) { post_leave(obj.seqno(), lock); } else { process_[idx].state_ = Process::S_FINISHED; } } bool interrupt(const C& obj) { size_t idx (indexof(obj.seqno())); gu::Lock lock(mutex_); while (obj.seqno() - last_left_ >= process_size_) // TODO: exit on error { lock.wait(cond_); } state_debug_print("interrupt", obj); if ((process_[idx].state_ == Process::S_IDLE && obj.seqno() > last_left_ ) || process_[idx].state_ == Process::S_WAITING ) { process_[idx].state_ = Process::S_CANCELED; process_[idx].cond_.signal(); // since last_left + 1 cannot be <= S_WAITING we're not // modifying a window here. No broadcasting. return true; } else { log_debug << "interrupting " << obj.seqno() << " state " << process_[idx].state_ << " le " << last_entered_ << " ll " << last_left_; } return false; } wsrep_seqno_t last_left() const { gu::Lock lock(mutex_); return last_left_; } wsrep_seqno_t last_entered() const { gu::Lock lock(mutex_); return last_entered_; } void last_left_gtid(wsrep_gtid_t& gtid) const { gu::Lock lock(mutex_); gtid.uuid = uuid_; gtid.seqno = last_left_; } ssize_t size() const { return process_size_; } bool would_block (wsrep_seqno_t seqno) const { return (seqno - last_left_ >= process_size_ || seqno > drain_seqno_); } void drain(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); state_debug_print("drain", seqno); while (drain_seqno_ != GU_LLONG_MAX) { lock.wait(cond_); } drain_common(seqno, lock); // there can be some stale canceled entries update_last_left(); drain_seqno_ = GU_LLONG_MAX; cond_.broadcast(); } void wait(wsrep_seqno_t seqno) { gu::Lock lock(mutex_); while (last_left_ < seqno) { size_t idx(indexof(seqno)); lock.wait(process_[idx].wait_cond_); } } void wait(gu::GTID& gtid, const gu::datetime::Date& wait_until) { gu::Lock lock(mutex_); if (gtid.uuid() != uuid_) { throw gu::NotFound(); } while (last_left_ < gtid.seqno()) { size_t idx(indexof(gtid.seqno())); lock.wait(process_[idx].wait_cond_, wait_until); } } void get_stats(double* oooe, double* oool, double* win_size) const { gu::Lock lock(mutex_); if (entered_ > 0) { *oooe = (oooe_ > 0 ? double(oooe_)/entered_ : .0); *oool = (oool_ > 0 ? double(oool_)/entered_ : .0); *win_size = (win_size_ > 0 ? double(win_size_)/entered_ : .0); } else { *oooe = .0; *oool = .0; *win_size = .0; } } void flush_stats() { gu::Lock lock(mutex_); oooe_ = 0; oool_ = 0; win_size_ = 0; entered_ = 0; } private: template void state_debug_print(const std::string& method, const T& x) { // #define GALERA_MONITOR_DEBUG_PRINT #ifdef GALERA_MONITOR_DEBUG_PRINT log_info << typeid(C).name() << ": " << method << "(" << x << "): le " << last_entered_ << ", ll " << last_left_; #endif /* GALERA_MONITOR_DEBUG_PRINT */ } size_t indexof(wsrep_seqno_t seqno) const { return (seqno & process_mask_); } bool may_enter(const C& obj) const { return obj.condition(last_entered_, last_left_); } // wait until it is possible to grab slot in monitor, // update last entered void pre_enter(C& obj, gu::Lock& lock) { assert(last_left_ <= last_entered_); const wsrep_seqno_t obj_seqno(obj.seqno()); while (would_block (obj_seqno)) // TODO: exit on error { lock.wait(cond_); } if (last_entered_ < obj_seqno) last_entered_ = obj_seqno; } void update_last_left() { for (wsrep_seqno_t i = last_left_ + 1; i <= last_entered_; ++i) { Process& a(process_[indexof(i)]); if (Process::S_FINISHED == a.state_) { a.state_ = Process::S_IDLE; last_left_ = i; a.wait_cond_.broadcast(); } else { break; } } assert(last_left_ <= last_entered_); } void wake_up_next() { for (wsrep_seqno_t i = last_left_ + 1; i <= last_entered_; ++i) { Process& a(process_[indexof(i)]); if (a.state_ == Process::S_WAITING && may_enter(*a.obj_) == true) { // We need to set state to APPLYING here because if // it is the last_left_ + 1 and it gets canceled in // the race that follows exit from this function, // there will be nobody to clean up and advance // last_left_. a.state_ = Process::S_APPLYING; a.cond_.signal(); } } } void post_leave(wsrep_seqno_t const obj_seqno, gu::Lock& lock) { const size_t idx(indexof(obj_seqno)); if (last_left_ + 1 == obj_seqno) // we're shrinking window { process_[idx].state_ = Process::S_IDLE; last_left_ = obj_seqno; process_[idx].wait_cond_.broadcast(); update_last_left(); oool_ += (last_left_ > obj_seqno); // wake up waiters that may remain above us (last_left_ // now is max) wake_up_next(); } else { process_[idx].state_ = Process::S_FINISHED; } process_[idx].obj_ = 0; assert((last_left_ >= obj_seqno && process_[idx].state_ == Process::S_IDLE) || process_[idx].state_ == Process::S_FINISHED); assert(last_left_ != last_entered_ || process_[indexof(last_left_)].state_ == Process::S_IDLE); if ((last_left_ >= obj_seqno) || // - occupied window shrinked (last_left_ >= drain_seqno_)) // - this is to notify drain that // we reached drain_seqno_ { cond_.broadcast(); } } void drain_common(wsrep_seqno_t seqno, gu::Lock& lock) { log_debug << "draining up to " << seqno; drain_seqno_ = seqno; if (last_left_ > drain_seqno_) { log_warn << "last left " << last_left_ << " greater than drain seqno " << drain_seqno_; #ifndef NDEBUG for (wsrep_seqno_t i = drain_seqno_; i <= last_left_; ++i) { const Process& a(process_[indexof(i)]); log_info << "applier " << i << " in state " << a.state_; } #endif } while (last_left_ < drain_seqno_) lock.wait(cond_); } typename Process::State state(const C& obj) const { const wsrep_seqno_t obj_seqno(obj.seqno()); const size_t idx(indexof(obj_seqno)); gu::Lock lock(mutex_); while (would_block (obj_seqno)) { lock.wait(cond_); } return process_[idx].state_; } Monitor(const Monitor&); void operator=(const Monitor&); mutable gu::Mutex mutex_; gu::Cond cond_; wsrep_uuid_t uuid_; wsrep_seqno_t last_entered_; wsrep_seqno_t last_left_; wsrep_seqno_t drain_seqno_; Process* process_; long entered_; // entered long oooe_; // out of order entered long oool_; // out of order left long win_size_; // window between last_left_ and last_entered_ }; } #endif // GALERA_APPLY_MONITOR_HPP galera-26.4.3/galera/src/key_set.hpp0000664000177500017540000005445213540715002015566 0ustar dbartmy// // Copyright (C) 2013-2018 Codership Oy // #ifndef GALERA_KEY_SET_HPP #define GALERA_KEY_SET_HPP #include "gu_rset.hpp" #include "gu_unordered.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include "key_data.hpp" namespace galera { /* forward declarations for KeySet::KeyPart */ class KeySetOut; class KeySet { public: enum Version { EMPTY = 0, FLAT8, /* 8-byte hash (flat) */ FLAT8A, /* 8-byte hash (flat), annotated */ FLAT16, /* 16-byte hash (flat) */ FLAT16A, /* 16-byte hash (flat), annotated */ // TREE8, /* 8-byte hash + full serialized key */ MAX_VERSION = FLAT16A }; static Version version (unsigned int ver) { if (gu_likely (ver <= MAX_VERSION)) return static_cast(ver); throw_version(ver); } static Version version (const std::string& ver); static const char* type(wsrep_key_type_t const t); class Key { public: enum Prefix // this stays for backward compatibility { P_SHARED = 0, P_EXCLUSIVE }; static int const TYPE_MAX = WSREP_KEY_EXCLUSIVE; }; /* class Key */ /* This class describes what commonly would be referred to as a "key". * It is called KeyPart because it does not fully represent a multi-part * key, but only nth part out of N total. * To fully represent a 3-part key p1:p2:p3 one would need 3 such objects: * for parts p1, p1:p2, p1:p2:p3 */ class KeyPart { public: static size_t const TMP_STORE_SIZE = 4096; static size_t const MAX_HASH_SIZE = 16; union TmpStore { gu::byte_t buf[TMP_STORE_SIZE]; gu_word_t align; }; union HashData { gu::byte_t buf[MAX_HASH_SIZE]; gu_word_t align; }; /* This ctor creates a serialized representation of a key in tmp store * from a key hash and optional annotation. */ KeyPart (TmpStore& tmp, const HashData& hash, const wsrep_buf_t* parts, /* for annotation */ Version const ver, int const prefix, int const part_num, int const alignment ) : data_(tmp.buf) { assert(ver > EMPTY && ver <= MAX_VERSION); /* 16 if ver in { FLAT16, FLAT16A }, 8 otherwise */ int const key_size (8 << (static_cast(ver - FLAT16) <= 1)); assert((key_size % alignment) == 0); assert((uintptr_t(tmp.buf) % GU_WORD_BYTES) == 0); assert((uintptr_t(hash.buf) % GU_WORD_BYTES) == 0); ::memcpy (tmp.buf, hash.buf, key_size); /* use lower bits for header: */ /* clear header bits */ gu::byte_t b = tmp.buf[0] & (~HEADER_MASK); /* set prefix */ assert(prefix <= PREFIX_MASK); b |= (prefix & PREFIX_MASK); /* set version */ b |= (ver & VERSION_MASK) << PREFIX_BITS; tmp.buf[0] = b; if (annotated(ver)) { store_annotation(parts, part_num, tmp.buf + key_size, sizeof(tmp.buf) - key_size, alignment); } } /* This ctor uses pointer to a permanently stored serialized key part */ KeyPart (const gu::byte_t* const buf, size_t const size) : data_(buf) { if (gu_likely(size >= 8 && serial_size() <= size)) return; throw_buffer_too_short (serial_size(), size); } explicit KeyPart (const gu::byte_t* ptr = NULL) : data_(ptr) {} /* converts wsrep key type to KeyPart "prefix" depending on writeset * version */ static int prefix(wsrep_key_type_t const ws_type, int const ws_ver) { if (ws_ver >= 0 && ws_ver <= 5) { switch (ws_type) { case WSREP_KEY_SHARED: return 0; case WSREP_KEY_REFERENCE: return ws_ver < 4 ? KeySet::Key::P_EXCLUSIVE : 1; case WSREP_KEY_UPDATE: return ws_ver < 4 ? KeySet::Key::P_EXCLUSIVE : (ws_ver < 5 ? 1 : 2); case WSREP_KEY_EXCLUSIVE: return ws_ver < 4 ? KeySet::Key::P_EXCLUSIVE : (ws_ver < 5 ? 2 : 3); } } assert(0); throw_bad_type_version(ws_type, ws_ver); } /* The return value is subject to interpretation based on the * writeset version which is done in wsrep_type(int) method */ int prefix() const { return (data_[0] & PREFIX_MASK); } wsrep_key_type_t wsrep_type(int const ws_ver) const { assert(ws_ver >= 0 && ws_ver <= 5); wsrep_key_type_t ret; switch (prefix()) { case 0: ret = WSREP_KEY_SHARED; break; case 1: ret = ws_ver < 4 ? WSREP_KEY_EXCLUSIVE : WSREP_KEY_REFERENCE; break; case 2: assert(ws_ver >= 4); ret = ws_ver < 5 ? WSREP_KEY_EXCLUSIVE : WSREP_KEY_UPDATE; break; case 3: assert(ws_ver >= 5); ret = WSREP_KEY_EXCLUSIVE; break; default: throw_bad_prefix(prefix()); } assert(prefix() == prefix(ret, ws_ver)); return ret; } static Version version(const gu::byte_t* const buf) { return Version( buf ? (buf[0] >> PREFIX_BITS) & VERSION_MASK : EMPTY); } Version version() const { return KeyPart::version(data_); } KeyPart (const KeyPart& k) : data_(k.data_) {} KeyPart& operator= (const KeyPart& k) { data_ = k.data_; return *this; } /* for hash table */ bool matches (const KeyPart& kp) const { assert (NULL != this->data_); assert (NULL != kp.data_); bool ret(true); // collision by default #if GU_WORDSIZE == 64 const uint64_t* lhs(reinterpret_cast(data_)); const uint64_t* rhs(reinterpret_cast(kp.data_)); #else const uint32_t* lhs(reinterpret_cast(data_)); const uint32_t* rhs(reinterpret_cast(kp.data_)); #endif /* WORDSIZE */ switch (std::min(version(), kp.version())) { case EMPTY: assert(0); throw_match_empty_key(version(), kp.version()); case FLAT16: case FLAT16A: #if GU_WORDSIZE == 64 ret = (lhs[1] == rhs[1]); #else ret = (lhs[2] == rhs[2] && lhs[3] == rhs[3]); #endif /* WORDSIZE */ /* fall through */ case FLAT8: case FLAT8A: /* shift is to clear up the header */ #if GU_WORDSIZE == 64 ret = ret && ((gtoh64(lhs[0]) >> HEADER_BITS) == (gtoh64(rhs[0]) >> HEADER_BITS)); #else ret = ret && (lhs[1] == rhs[1] && (gtoh32(lhs[0]) >> HEADER_BITS) == (gtoh32(rhs[0]) >> HEADER_BITS)); #endif /* WORDSIZE */ } return ret; } size_t hash () const { /* Now this leaves uppermost bits always 0. * How bad is it in practice? Is it reasonable to assume that only * lower bits are used in unordered set? */ size_t ret(gu::gtoh(reinterpret_cast(data_)[0]) >> HEADER_BITS); return ret; // (ret ^ (ret << HEADER_BITS)) to cover 0 bits } static size_t serial_size (const gu::byte_t* const buf, size_t const size) { Version const ver(version(buf)); return serial_size (ver, buf, size); } size_t serial_size () const { return KeyPart::serial_size(data_, -1U); } void print (std::ostream& os) const; void swap (KeyPart& other) { using std::swap; swap(data_, other.data_); } const gu::byte_t* ptr() const { return data_; } protected: friend class KeySetOut; /* update data pointer */ void update_ptr(const gu::byte_t* ptr) const { data_ = ptr; } /* update storage of KeyPart already inserted in unordered set */ void store(gu::RecordSetOut& rs) const { data_ = rs.append(data_, serial_size(), true, true).first; // log_info << "Stored key of size: " << serial_size(); } private: static unsigned int const PREFIX_BITS = 2; static gu::byte_t const PREFIX_MASK = (1 << PREFIX_BITS) - 1; static unsigned int const VERSION_BITS = 3; static gu::byte_t const VERSION_MASK = (1 << VERSION_BITS) - 1; static unsigned int const HEADER_BITS = PREFIX_BITS + VERSION_BITS; static gu::byte_t const HEADER_MASK = (1 << HEADER_BITS) - 1; mutable /* to be able to store const object */ const gu::byte_t* data_; // it never owns the buffer static size_t base_size (Version const ver, const gu::byte_t* const buf, size_t const size) { switch (ver) { case FLAT16: case FLAT16A: return 16; case FLAT8: case FLAT8A: return 8; case EMPTY: assert(0); } abort(); } static bool annotated (Version const ver) { return (ver == FLAT16A || ver == FLAT8A); } typedef uint16_t ann_size_t; static size_t serial_size (Version const ver, const gu::byte_t* const buf, size_t const size = -1U) { size_t ret(base_size(ver, buf, size)); assert (ret <= size); if (annotated(ver)) { assert (ret + 2 <= size); ret +=gu::gtoh(*reinterpret_cast(buf + ret)); assert (ret <= size); } return ret; } static size_t store_annotation (const wsrep_buf_t* parts, int part_num, gu::byte_t* buf, int size, int alignment); static void print_annotation (std::ostream& os, const gu::byte_t* buf); static void throw_buffer_too_short (size_t expected, size_t got) GU_NORETURN; static void throw_bad_type_version (wsrep_key_type_t t, int v) GU_NORETURN; static void throw_bad_prefix (gu::byte_t p) GU_NORETURN; static void throw_match_empty_key (Version my, Version other) GU_NORETURN; }; /* class KeyPart */ class KeyPartHash { public: size_t operator() (const KeyPart& k) const { return k.hash(); } }; class KeyPartEqual { public: bool operator() (const KeyPart& l, const KeyPart& r) const { return (l.matches(r)); } }; /* functor KeyPartEqual */ static void throw_version(int) GU_NORETURN; }; /* class KeySet */ inline void swap (KeySet::KeyPart& a, KeySet::KeyPart& b) { a.swap(b); } inline std::ostream& operator << (std::ostream& os, const KeySet::KeyPart& kp) { kp.print (os); return os; } #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif class KeySetOut : public gu::RecordSetOut { public: typedef gu::UnorderedSet < KeySet::KeyPart, KeySet::KeyPartHash, KeySet::KeyPartEqual > /* This #if decides whether we use straight gu::UnorderedSet for appended * key parts (0), or go for an optimized version (1). Don't remove it. */ #if 0 KeyParts; #else KeyPartSet; /* This is a naive mock up of an "unordered set" that first tries to use * preallocated set of buckets and falls back to a "real" heap-based * unordered set from STL/TR1 when preallocated one is exhausted. * The goal is to make sure that at least 3 keys can be inserted without * the need for dynamic allocation. * In practice, with 64 "buckets" and search depth of 3, the average * number of inserted keys before there is a need to go for heap is 25. * 128 buckets will give you 45 and 256 - around 80. */ class KeyParts { public: KeyParts() : first_(), second_(NULL), first_size_(0) { ::memset(first_, 0, sizeof(first_)); } ~KeyParts() { delete second_; } /* This iterator class is declared for compatibility with * unordered_set. We may actually use a more simple interface here. */ class iterator { public: iterator(const KeySet::KeyPart* kp) : kp_(kp) {} /* This is sort-of a dirty hack to ensure that first_ array * of KeyParts class can be treated like a POD array. * It uses the fact that the only non-static member of * KeySet::KeyPart is gu::byte_t* and so does direct casts between * pointers. I wish someone could make it cleaner. */ iterator(const gu::byte_t** kp) : kp_(reinterpret_cast(kp)) {} const KeySet::KeyPart* operator -> () const { return kp_; } const KeySet::KeyPart& operator * () const { return *kp_; } bool operator == (const iterator& i) const { return (kp_ == i.kp_); } bool operator != (const iterator& i) const { return (kp_ != i.kp_); } private: const KeySet::KeyPart* kp_; }; const iterator end() { return iterator(static_cast(NULL)); } const iterator find(const KeySet::KeyPart& kp) { unsigned int idx(kp.hash()); for (unsigned int i(0); i < FIRST_DEPTH; ++i, ++idx) { idx &= FIRST_MASK; if (0 !=first_[idx] && KeySet::KeyPart(first_[idx]).matches(kp)) { return iterator(&first_[idx]); } } if (second_ && second_->size() > 0) { KeyPartSet::iterator i2(second_->find(kp)); if (i2 != second_->end()) return iterator(&(*i2)); } return end(); } std::pair insert(const KeySet::KeyPart& kp) { unsigned int idx(kp.hash()); for (unsigned int i(0); i < FIRST_DEPTH; ++i, ++idx) { idx &= FIRST_MASK; if (0 == first_[idx]) { first_[idx] = kp.ptr(); ++first_size_; return std::pair(iterator(&first_[idx]), true); } if (KeySet::KeyPart(first_[idx]).matches(kp)) { return std::pair(iterator(&first_[idx]),false); } } if (!second_) { second_ = new KeyPartSet(); // log_info << "Requesting heap at load factor " // << first_size_ << '/' << FIRST_SIZE << " = " // << (double(first_size_)/FIRST_SIZE); } std::pair res = second_->insert(kp); return std::pair(iterator(&(*res.first)), res.second); } iterator erase(iterator it) { unsigned int idx(it->hash()); for (unsigned int i(0); i < FIRST_DEPTH; ++i, ++idx) { idx &= FIRST_MASK; if (first_[idx] && KeySet::KeyPart(first_[idx]).matches(*it)) { first_[idx] = 0; --first_size_; return iterator(&first_[(idx + 1) & FIRST_MASK]); } } if (second_ && second_->size() > 0) { KeyPartSet::iterator it2(second_->erase(second_->find(*it))); if (it2 != second_->end()) return iterator(&(*it2)); } return end(); } size_t size() const { return (first_size_ + second_->size()); } private: static unsigned int const FIRST_MASK = 0x3f; // 63 static unsigned int const FIRST_SIZE = FIRST_MASK + 1; static unsigned int const FIRST_DEPTH = 3; const gu::byte_t* first_[FIRST_SIZE]; KeyPartSet* second_; unsigned int first_size_; }; #endif /* 1 */ class KeyPart { public: KeyPart (KeySet::Version const ver = KeySet::FLAT16) : hash_ (), part_ (0), value_(0), size_ (0), ver_ (ver), own_ (false) { assert (ver_); } /* to throw in KeyPart() ctor in case it is a duplicate */ class DUPLICATE {}; KeyPart (KeyParts& added, KeySetOut& store, const KeyPart* parent, const KeyData& kd, int const part_num, int const ws_ver, int const alignment); KeyPart (const KeyPart& k) : hash_ (k.hash_), part_ (k.part_), value_(k.value_), size_ (k.size_), ver_ (k.ver_), own_ (k.own_) { assert (ver_); k.own_ = false; } friend void swap (KeyPart& l, KeyPart& r) { using std::swap; swap (l.hash_, r.hash_ ); swap (l.part_, r.part_ ); swap (l.value_, r.value_); swap (l.size_, r.size_ ); swap (l.ver_, r.ver_ ); swap (l.own_, r.own_ ); } KeyPart& operator= (KeyPart k) { swap(*this, k); return *this; } bool match (const void* const v, size_t const s) const { return (size_ == s && !(::memcmp (value_, v, size_))); } int prefix() const { return (part_ ? part_->prefix() : 0); } void acquire() { gu::byte_t* tmp = new gu::byte_t[size_]; std::copy(value_, value_ + size_, tmp); value_ = tmp; own_ = true; } void release() { if (own_) { // log_debug << "released: " << gu::Hexdump(value_, size_, true); delete[] value_; value_ = 0; } own_ = false; } ~KeyPart() { release(); } void print (std::ostream& os) const; typedef gu::RecordSet::GatherVector GatherVector; private: gu::Hash hash_; const KeySet::KeyPart* part_; mutable const gu::byte_t* value_; unsigned int size_; KeySet::Version ver_; mutable bool own_; }; /* class KeySetOut::KeyPart */ KeySetOut () // empty ctor for slave TrxHandle : gu::RecordSetOut(), added_(), prev_ (), new_ (), version_() {} KeySetOut (gu::byte_t* reserved, size_t reserved_size, const BaseName& base_name, KeySet::Version const version, gu::RecordSet::Version const rsv, int const ws_ver) : gu::RecordSetOut ( reserved, reserved_size, base_name, check_type(version), rsv ), added_(), prev_ (), new_ (), version_(version), ws_ver_(ws_ver) { assert (version_ != KeySet::EMPTY); assert ((uintptr_t(reserved) % GU_WORD_BYTES) == 0); assert (ws_ver <= 5); KeyPart zero(version_); prev_().push_back(zero); } ~KeySetOut () {} size_t append (const KeyData& kd); KeySet::Version version () { return count() ? version_ : KeySet::EMPTY; } private: // depending on version we may pack data differently KeyParts added_; gu::Vector prev_; gu::Vector new_; KeySet::Version version_; int ws_ver_; static gu::RecordSet::CheckType check_type (KeySet::Version ver) { switch (ver) { case KeySet::EMPTY: break; /* Can't create EMPTY KeySetOut */ default: return gu::RecordSet::CHECK_MMH128; } KeySet::throw_version(ver); } }; /* class KeySetOut */ inline std::ostream& operator << (std::ostream& os, const KeySetOut::KeyPart& kp) { kp.print (os); return os; } class KeySetIn : public gu::RecordSetIn { public: KeySetIn (KeySet::Version ver, const gu::byte_t* buf, size_t size) : gu::RecordSetIn(buf, size, false), version_(ver) {} KeySetIn () : gu::RecordSetIn(), version_(KeySet::EMPTY) {} void init (KeySet::Version ver, const gu::byte_t* buf, size_t size) { gu::RecordSetIn::init(buf, size, false); version_ = ver; } KeySet::KeyPart const next () const { return gu::RecordSetIn::next(); } private: KeySet::Version version_; }; /* class KeySetIn */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif } /* namespace galera */ #endif // GALERA_KEY_SET_HPP galera-26.4.3/galera/src/saved_state.cpp0000664000177500017540000001744513540715002016421 0ustar dbartmy// // Copyright (C) 2012-2018 Codership Oy // #include "saved_state.hpp" #include #include #include #define __STDC_FORMAT_MACROS #include #include #include namespace galera { #define VERSION "2.1" #define MAX_SIZE 256 SavedState::SavedState (const std::string& file) : fs_ (0), filename_ (file), uuid_ (WSREP_UUID_UNDEFINED), seqno_ (WSREP_SEQNO_UNDEFINED), safe_to_bootstrap_(true), unsafe_ (0), corrupt_ (false), mtx_ (), written_uuid_ (uuid_), current_len_ (0), total_marks_ (0), total_locks_ (0), total_writes_ (0) { GU_DBUG_EXECUTE("galera_init_invalidate_state", unlink(file.c_str());); std::ifstream ifs(file.c_str()); if (ifs.fail()) { log_warn << "Could not open state file for reading: '" << file << '\''; } fs_ = fopen(file.c_str(), "a"); if (!fs_) { gu_throw_error(errno) << "Could not open state file for writing: '" << file << "'. Check permissions and/or disk space."; } // We take exclusive lock on state file in order to avoid possibility // of two Galera replicators sharing the same state file. struct flock flck; flck.l_start = 0; flck.l_len = 0; flck.l_type = F_WRLCK; flck.l_whence = SEEK_SET; if (::fcntl(fileno(fs_), F_SETLK, &flck)) { log_warn << "Could not get exclusive lock on state file: " << file << ": " << ::strerror(errno); return; } std::string version("0.8"); std::string line; while (getline(ifs, line), ifs.good()) { std::istringstream istr(line); std::string param; istr >> param; if (param[0] == '#') { log_debug << "read comment: " << line; } else if (param == "version:") { istr >> version; // nothing to do with this yet log_debug << "read version: " << version; } else if (param == "uuid:") { try { istr >> uuid_; log_debug << "read saved state uuid: " << uuid_; } catch (gu::Exception& e) { log_error << e.what(); uuid_ = WSREP_UUID_UNDEFINED; } } else if (param == "seqno:") { istr >> seqno_; log_debug << "read saved state seqno: " << seqno_; } else if (param == "safe_to_bootstrap:") { istr >> safe_to_bootstrap_; log_debug << "read safe_to_bootstrap: " << safe_to_bootstrap_; } } log_info << "Found saved state: " << uuid_ << ':' << seqno_ << ", safe_to_bootstrap: " << safe_to_bootstrap_; #if 0 // we'll probably have it legal if (seqno_ < 0 && uuid_ != WSREP_UUID_UNDEFINED) { log_warn << "Negative seqno with valid UUID: " << uuid_ << ':' << seqno_ << ". Discarding UUID."; uuid_ = WSREP_UUID_UNDEFINED; } #endif written_uuid_ = uuid_; current_len_ = ftell (fs_); log_debug << "Initialized current_len_ to " << current_len_; if (current_len_ <= MAX_SIZE) { fs_ = freopen (file.c_str(), "r+", fs_); } else // normalize file contents { fs_ = freopen (file.c_str(), "w+", fs_); // truncate current_len_ = 0; set (uuid_, seqno_, safe_to_bootstrap_); } } SavedState::~SavedState () { if (fs_) { // Closing file descriptor should release the lock, but still... struct flock flck; flck.l_start = 0; flck.l_len = 0; flck.l_type = F_UNLCK; flck.l_whence = SEEK_SET; if (::fcntl(fileno(fs_), F_SETLK, &flck)) { log_warn << "Could not unlock state file: " << ::strerror(errno); } fclose(fs_); } } void SavedState::get (wsrep_uuid_t& u, wsrep_seqno_t& s, bool& safe_to_bootstrap) { gu::Lock lock(mtx_); u = uuid_; s = seqno_; safe_to_bootstrap = safe_to_bootstrap_; } void SavedState::set (const wsrep_uuid_t& u, wsrep_seqno_t s, bool safe_to_bootstrap) { gu::Lock lock(mtx_); ++total_locks_; if (corrupt_) return; uuid_ = u; seqno_ = s; safe_to_bootstrap_ = safe_to_bootstrap; if (0 == unsafe_()) write_file (u, s, safe_to_bootstrap); else log_debug << "Not writing state: unsafe counter is " << unsafe_(); } /* the goal of unsafe_, written_uuid_, current_len_ below is * 1. avoid unnecessary mutex locks * 2. if locked - avoid unnecessary file writes * 3. if writing - avoid metadata operations, write over existing space */ void SavedState::mark_unsafe() { ++total_marks_; if (1 == unsafe_.add_and_fetch (1)) { gu::Lock lock(mtx_); ++total_locks_; assert (unsafe_() > 0); if (written_uuid_ != WSREP_UUID_UNDEFINED) { write_file (WSREP_UUID_UNDEFINED, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } } } void SavedState::mark_safe() { ++total_marks_; long count = unsafe_.sub_and_fetch (1); assert (count >= 0); if (0 == count) { gu::Lock lock(mtx_); ++total_locks_; if (0 == unsafe_() && (written_uuid_ != uuid_ || seqno_ >= 0) && !corrupt_) { /* this will write down proper seqno if set() was called too early * (in unsafe state) */ write_file (uuid_, seqno_, safe_to_bootstrap_); } } } void SavedState::mark_corrupt() { gu::Lock lock(mtx_); ++total_locks_; if (corrupt_) return; uuid_ = WSREP_UUID_UNDEFINED; seqno_ = WSREP_SEQNO_UNDEFINED; corrupt_ = true; write_file (WSREP_UUID_UNDEFINED, WSREP_SEQNO_UNDEFINED, safe_to_bootstrap_); } void SavedState::mark_uncorrupt(const wsrep_uuid_t& u, wsrep_seqno_t s) { gu::Lock lock(mtx_); ++total_locks_; if (!corrupt_) return; uuid_ = u; seqno_ = s; unsafe_ = 0; corrupt_ = false; write_file (u, s, safe_to_bootstrap_); } void SavedState::write_file(const wsrep_uuid_t& u, const wsrep_seqno_t s, bool safe_to_bootstrap) { assert (current_len_ <= MAX_SIZE); if (fs_) { if (s >= 0) { log_debug << "Saving state: " << u << ':' << s; } char buf[MAX_SIZE]; int state_len = snprintf (buf, MAX_SIZE - 1, "# GALERA saved state" "\nversion: " VERSION "\nuuid: " GU_UUID_FORMAT "\nseqno: %" PRId64 "\nsafe_to_bootstrap: %d\n", GU_UUID_ARGS(&u), s, safe_to_bootstrap); int write_size; for (write_size = state_len; write_size < current_len_; ++write_size) buf[write_size] = ' '; // overwrite whatever is there currently rewind(fs_); if (fwrite(buf, write_size, 1, fs_) == 0) { log_warn << "write file(" << filename_ << ") failed(" << strerror(errno) << ")"; return; } if (fflush(fs_) != 0) { log_warn << "fflush file(" << filename_ << ") failed(" << strerror(errno) << ")"; return; } if (fsync(fileno(fs_)) < 0) { log_warn << "fsync file(" << filename_ << ") failed(" << strerror(errno) << ")"; return; } current_len_ = state_len; written_uuid_ = u; ++total_writes_; } else { log_debug << "Can't save state: output stream is not open."; } } } /* namespace galera */ galera-26.4.3/galera/src/key_entry_os.hpp0000664000177500017540000001073513540715002016631 0ustar dbartmy// // Copyright (C) 2012 Codership Oy // #ifndef GALERA_KEY_ENTRY_OS_HPP #define GALERA_KEY_ENTRY_OS_HPP #include "key_os.hpp" namespace galera { class TrxHandleSlave; class KeyEntryOS { public: KeyEntryOS(const KeyOS& row_key) : key_(row_key), ref_trx_(0), ref_full_trx_(0), ref_shared_trx_(0), ref_full_shared_trx_(0) {} template KeyEntryOS(int version, Ci begin, Ci end, uint8_t flags) : key_(version, begin, end, flags), ref_trx_(0), ref_full_trx_(0), ref_shared_trx_(0), ref_full_shared_trx_(0) {} KeyEntryOS(const KeyEntryOS& other) : key_(other.key_), ref_trx_(other.ref_trx_), ref_full_trx_(other.ref_full_trx_), ref_shared_trx_(other.ref_shared_trx_), ref_full_shared_trx_(other.ref_full_shared_trx_) {} ~KeyEntryOS() { assert(ref_trx_ == 0); assert(ref_full_trx_ == 0); assert(ref_shared_trx_ == 0); assert(ref_full_shared_trx_ == 0); } const KeyOS& get_key() const { return key_; } const KeyOS& get_key(int version) const { return key_; } void ref(TrxHandleSlave* trx, bool full_key) { #ifndef NDEBUG assert_ref(trx, full_key); #endif /* NDEBUG */ ref_trx_ = trx; if (full_key == true) { ref_full_trx_ = trx; } } void unref(TrxHandleSlave* trx, bool full_key) { assert(ref_trx_ != 0); if (ref_trx_ == trx) ref_trx_ = 0; if (full_key == true && ref_full_trx_ == trx) { ref_full_trx_ = 0; } else { #ifndef NDEBUG assert_unref(trx); #endif /* NDEBUG */ } } void ref_shared(TrxHandleSlave* trx, bool full_key) { #ifndef NDEBUG assert_ref_shared(trx, full_key); #endif /* NDEBUG */ ref_shared_trx_ = trx; if (full_key == true) { ref_full_shared_trx_ = trx; } } void unref_shared(TrxHandleSlave* trx, bool full_key) { assert(ref_shared_trx_ != 0); if (ref_shared_trx_ == trx) ref_shared_trx_ = 0; if (full_key == true && ref_full_shared_trx_ == trx) { ref_full_shared_trx_ = 0; } else { #ifndef NDEBUG assert_unref_shared(trx); #endif /* NDEBUG */ } } const TrxHandleSlave* ref_trx() const { return ref_trx_; } const TrxHandleSlave* ref_full_trx() const { return ref_full_trx_; } const TrxHandleSlave* ref_shared_trx() const { return ref_shared_trx_; } const TrxHandleSlave* ref_full_shared_trx() const { return ref_full_shared_trx_; } size_t size() const { return key_.size() + sizeof(*this); } private: void operator=(const KeyEntryOS&); KeyOS key_; TrxHandleSlave* ref_trx_; TrxHandleSlave* ref_full_trx_; TrxHandleSlave* ref_shared_trx_; TrxHandleSlave* ref_full_shared_trx_; #ifndef NDEBUG void assert_ref(TrxHandleSlave*, bool) const; void assert_unref(TrxHandleSlave*) const; void assert_ref_shared(TrxHandleSlave*, bool) const; void assert_unref_shared(TrxHandleSlave*) const; #endif /* NDEBUG */ }; class KeyEntryPtrHash { public: size_t operator()(const KeyEntryOS* const ke) const { return ke->get_key().hash(); } }; class KeyEntryPtrHashAll { public: size_t operator()(const KeyEntryOS* const ke) const { return ke->get_key().hash_with_flags(); } }; class KeyEntryPtrEqual { public: bool operator()(const KeyEntryOS* const left, const KeyEntryOS* const right) const { return left->get_key() == right->get_key(); } }; class KeyEntryPtrEqualAll { public: bool operator()(const KeyEntryOS* const left, const KeyEntryOS* const right) const { return left->get_key().equal_all(right->get_key()); } }; } #endif // GALERA_KEY_ENTRY_HPP galera-26.4.3/galera/SConscript0000664000177500017540000000006413540715002014623 0ustar dbartmy SConscript(['src/SConscript', 'tests/SConscript']) galera-26.4.3/galera/tests/0000775000177500017540000000000013540715002013753 5ustar dbartmygalera-26.4.3/galera/tests/write_set_ng_check.cpp0000664000177500017540000002616113540715002020313 0ustar dbartmy/* Copyright (C) 2013-2018 Codership Oy * * $Id$ */ #undef NDEBUG #include "test_key.hpp" #include "../src/write_set_ng.hpp" #include "gu_uuid.h" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include using namespace galera; static void ver3_basic(gu::RecordSet::Version const rsv, WriteSetNG::Version const wsv) { int const alignment(rsv >= gu::RecordSet::VER2 ? GU_MIN_ALIGNMENT : 1); uint16_t const flag1(0xabcd); wsrep_uuid_t source; gu_uuid_generate (reinterpret_cast(&source), NULL, 0); wsrep_conn_id_t const conn(652653); wsrep_trx_id_t const trx(99994952); std::string const dir("."); wsrep_trx_id_t trx_id(1); WriteSetOut wso (dir, trx_id, KeySet::FLAT8A, 0, 0, flag1, rsv, wsv); fail_unless (wso.is_empty()); // keep WSREP_KEY_SHARED here, see loop below TestKey tk0(KeySet::MAX_VERSION, WSREP_KEY_SHARED, true, "a0"); wso.append_key(tk0()); fail_if (wso.is_empty()); uint64_t const data_out_volatile(0xaabbccdd); uint32_t const data_out_persistent(0xffeeddcc); uint16_t const flag2(0x1234); { uint64_t const d(data_out_volatile); wso.append_data (&d, sizeof(d), true); } wso.append_data (&data_out_persistent, sizeof(data_out_persistent), false); wso.add_flags (flag2); uint16_t const flags(flag1 | flag2); WriteSetNG::GatherVector out; size_t const out_size(wso.gather(source, conn, trx, out)); log_info << "Gather size: " << out_size << ", buf count: " << out->size(); fail_if((out_size % alignment) != 0); wsrep_seqno_t const last_seen(1); wsrep_seqno_t const seqno(2); int const pa_range(seqno - last_seen); wso.finalize(last_seen, 0); /* concatenate all out buffers */ std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(static_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } fail_if (in.size() != out_size); gu::Buf const in_buf = { in.data(), static_cast(in.size()) }; int const P_SHARED(KeySet::KeyPart::prefix(WSREP_KEY_SHARED, wsv)); /* read ws buffer and "certify" */ { mark_point(); WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); wsrep_seqno_t const ls(wsi.last_seen()); fail_if (ls != last_seen, "Found last seen: %lld, expected: %lld", ls, last_seen); fail_if (wsi.flags() != flags); fail_if (0 == wsi.timestamp()); fail_if (wsi.annotated()); mark_point(); const KeySetIn& ksi(wsi.keyset()); fail_if (ksi.count() != 1); mark_point(); int shared(0); for (int i(0); i < ksi.count(); ++i) { KeySet::KeyPart kp(ksi.next()); shared += (kp.prefix() == P_SHARED); } fail_unless(shared > 0); wsi.verify_checksum(); wsi.set_seqno (seqno, pa_range); fail_unless(wsi.pa_range() == pa_range, "wsi.pa_range = %lld\n pa_range = %lld", wsi.pa_range(), pa_range); fail_unless(wsi.certified()); } /* repeat reading buffer after "certification" */ { WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); fail_unless(wsi.certified()); fail_if (wsi.seqno() != seqno); fail_if (wsi.flags() != (flags | WriteSetNG::F_CERTIFIED)); fail_if (0 == wsi.timestamp()); mark_point(); const KeySetIn& ksi(wsi.keyset()); fail_if (ksi.count() != 1); mark_point(); int shared(0); for (int i(0); i < ksi.count(); ++i) { KeySet::KeyPart kp(ksi.next()); shared += (kp.prefix() == P_SHARED); } fail_unless(shared > 0); wsi.verify_checksum(); mark_point(); const DataSetIn& dsi(wsi.dataset()); fail_if (dsi.count() != 1); mark_point(); gu::Buf const d(dsi.next()); fail_if (d.size != sizeof(data_out_volatile) + sizeof(data_out_persistent)); const char* dptr = static_cast(d.ptr); fail_if (*(reinterpret_cast(dptr)) != data_out_volatile); fail_if (*(reinterpret_cast (dptr + sizeof(data_out_volatile))) != data_out_persistent); mark_point(); const DataSetIn& usi(wsi.unrdset()); fail_if (usi.count() != 0); fail_if (usi.size() != 0); } mark_point(); try /* this is to test checksum after set_seqno() */ { WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); fail_unless(wsi.certified()); fail_if (wsi.pa_range() != pa_range); fail_if (wsi.seqno() != seqno); fail_if (memcmp(&wsi.source_id(), &source, sizeof(source))); fail_if (wsi.conn_id() != conn); fail_if (wsi.trx_id() != trx); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } mark_point(); /* this is to test reassembly without keys and unordered data after gather() * + late initialization */ try { WriteSetIn tmp_wsi(in_buf); WriteSetIn::GatherVector out; mark_point(); tmp_wsi.verify_checksum(); gu_trace(tmp_wsi.gather(out, false, false)); // no keys or unrd /* concatenate all out buffers */ std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr (static_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } mark_point(); gu::Buf tmp_buf = { in.data(), static_cast(in.size()) }; WriteSetIn wsi; // first - create an empty writeset wsi.read_buf(tmp_buf); // next - initialize from buffer mark_point(); wsi.verify_checksum(); fail_unless(wsi.certified()); fail_if (wsi.pa_range() != pa_range); fail_if (wsi.seqno() != seqno); fail_if (wsi.keyset().count() != 0); fail_if (wsi.dataset().count() == 0); fail_if (wsi.unrdset().count() != 0); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } in[in.size() - 1] ^= 1; // corrupted the last byte (payload) mark_point(); try /* this is to test payload corruption */ { WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); fail("payload corruption slipped through 1"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } try /* this is to test background checksumming + corruption */ { WriteSetIn wsi(in_buf, 2); mark_point(); try { wsi.verify_checksum(); fail("payload corruption slipped through 2"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } } catch (std::exception& e) { fail("%s", e.what()); } in[2] ^= 1; // corrupted 3rd byte of header try /* this is to test header corruption */ { WriteSetIn wsi(in_buf, 2 /* this should postpone payload checksum */); wsi.verify_checksum(); fail("header corruption slipped through"); } catch (gu::Exception& e) { fail_if (e.get_errno() != EINVAL); } } START_TEST (ver3_basic_rsv1) { ver3_basic(gu::RecordSet::VER1, WriteSetNG::VER3); } END_TEST START_TEST (ver3_basic_rsv2_wsv3) { ver3_basic(gu::RecordSet::VER2, WriteSetNG::VER3); } END_TEST START_TEST (ver3_basic_rsv2_wsv4) { ver3_basic(gu::RecordSet::VER2, WriteSetNG::VER4); } END_TEST static void ver3_annotation(gu::RecordSet::Version const rsv) { int const alignment(rsv >= gu::RecordSet::VER2 ? GU_MIN_ALIGNMENT : 1); uint16_t const flag1(0xabcd); wsrep_uuid_t source; gu_uuid_generate (reinterpret_cast(&source), NULL, 0); wsrep_conn_id_t const conn(652653); wsrep_trx_id_t const trx(99994952); std::string const dir("."); wsrep_trx_id_t trx_id(1); WriteSetOut wso (dir, trx_id, KeySet::FLAT16, 0, 0, flag1, rsv, WriteSetNG::VER3); fail_unless (wso.is_empty()); TestKey tk0(KeySet::MAX_VERSION, WSREP_KEY_SHARED, true, "key0"); wso.append_key(tk0()); fail_if (wso.is_empty()); uint64_t const data(0xaabbccdd); std::string const annotation("0xaabbccdd"); uint16_t const flag2(0x1234); wso.append_data (&data, sizeof(data), true); wso.append_annotation (annotation.c_str(), annotation.size(), true); wso.add_flags (flag2); uint16_t const flags(flag1 | flag2); WriteSetNG::GatherVector out; size_t const out_size(wso.gather(source, conn, trx, out)); log_info << "Gather size: " << out_size << ", buf count: " << out->size(); fail_if((out_size % alignment) != 0); fail_if(out_size < (sizeof(data) + annotation.size())); wsrep_seqno_t const last_seen(1); wso.finalize(last_seen, 0); /* concatenate all out buffers */ std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(static_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } fail_if (in.size() != out_size); gu::Buf const in_buf = { in.data(), static_cast(in.size()) }; /* read buffer into WriteSetIn */ mark_point(); WriteSetIn wsi(in_buf); mark_point(); wsi.verify_checksum(); wsrep_seqno_t const ls(wsi.last_seen()); fail_if (ls != last_seen, "Found last seen: %lld, expected: %lld", ls, last_seen); fail_if (wsi.flags() != flags); fail_if (0 == wsi.timestamp()); fail_if (!wsi.annotated()); /* check that annotation has survived */ std::ostringstream os; wsi.write_annotation(os); std::string const res(os.str().c_str()); fail_if(annotation.length() != res.length(), "Initial ann. length: %zu, resulting ann.length: %zu", annotation.length(), res.length()); fail_if(annotation != res, "Initial annotation: '%s', resulting annotation: '%s'", annotation.c_str(), res.c_str()); } START_TEST (ver3_annotation_rsv1) { ver3_annotation(gu::RecordSet::VER1); } END_TEST START_TEST (ver3_annotation_rsv2) { ver3_annotation(gu::RecordSet::VER2); } END_TEST Suite* write_set_ng_suite () { Suite* s = suite_create ("WriteSet"); TCase* t = tcase_create ("WriteSet basic"); tcase_add_test (t, ver3_basic_rsv1); tcase_add_test (t, ver3_basic_rsv2_wsv3); tcase_add_test (t, ver3_basic_rsv2_wsv4); tcase_set_timeout(t, 60); suite_add_tcase (s, t); t = tcase_create ("WriteSet annotation"); tcase_add_test (t, ver3_annotation_rsv1); tcase_add_test (t, ver3_annotation_rsv2); tcase_set_timeout(t, 60); suite_add_tcase (s, t); return s; } galera-26.4.3/galera/tests/test_key.hpp0000664000177500017540000000434713540715002016323 0ustar dbartmy/* Copyright (C) 2013-2018 Codership Oy * * $Id$ */ #ifndef _TEST_KEY_HPP_ #define _TEST_KEY_HPP_ #include "../src/key_data.hpp" #include "../src/key_set.hpp" // for version_to_hash_size #include #include using namespace galera; class TestKey { public: TestKey (int a, int ver, wsrep_key_type_t type, std::vector parts, bool copy = true) : parts_ (), ver_ (ver), type_ (type), copy_ (copy) { parts_.reserve(parts.size()); for (size_t i = 0; i < parts.size(); ++i) { size_t p_len(parts[i] ? strlen(parts[i]) + 1 : 0); wsrep_buf_t b = { parts[i], p_len }; parts_.push_back(b); } } TestKey (int ver, wsrep_key_type_t type, bool copy, const char* part0, const char* part1 = 0, const char* part2 = 0, const char* part3 = 0, const char* part4 = 0, const char* part5 = 0, const char* part6 = 0, const char* part7 = 0, const char* part8 = 0, const char* part9 = 0 ) : parts_ (), ver_ (ver), type_ (type), copy_ (copy) { parts_.reserve(10); (push_back(part0) && push_back(part1) && push_back(part2) && push_back(part3) && push_back(part4) && push_back(part5) && push_back(part6) && push_back(part7) && push_back(part8) && push_back(part9)); } KeyData operator() () { return KeyData (ver_, parts_.data(), parts_.size(), type_, copy_); } private: std::vector parts_; int const ver_; wsrep_key_type_t const type_; bool const copy_; bool push_back (const char* const p) { size_t p_len(-1); if (p && (p_len = strlen(p) + 1) > 0) { wsrep_buf_t b = { p, p_len }; parts_.push_back(b); return true; } return false; } }; #endif /* _TEST_KEY_HPP_ */ galera-26.4.3/galera/tests/defaults_check.cpp0000664000177500017540000002517613540715002017436 0ustar dbartmy// // Copyright (C) 2018 Codership Oy // #include extern "C" int wsrep_loader(wsrep_t*); #include #include #include #include #include // GU_WORDSIZE #include #include #include #include #include #include // unlink() /* "magic" value for parameters which defaults a variable */ static const char* const VARIABLE = "variable"; static const char* Defaults[] = { "base_dir", ".", "base_port", "4567", "cert.log_conflicts", "no", "cert.optimistic_pa", "yes", "debug", "no", #ifndef NDEBUG "dbug", "", #endif "evs.auto_evict", "0", "evs.causal_keepalive_period", "PT1S", "evs.debug_log_mask", "0x1", "evs.delay_margin", "PT1S", "evs.delayed_keep_period", "PT30S", "evs.inactive_check_period", "PT0.5S", "evs.inactive_timeout", "PT15S", "evs.info_log_mask", "0", "evs.install_timeout", "PT7.5S", "evs.join_retrans_period", "PT1S", "evs.keepalive_period", "PT1S", "evs.max_install_timeouts", "3", "evs.send_window", "4", "evs.stats_report_period", "PT1M", "evs.suspect_timeout", "PT5S", "evs.use_aggregate", "true", "evs.user_send_window", "2", "evs.version", "1", "evs.view_forget_timeout", "P1D", #ifndef NDEBUG "gcache.debug", "0", #endif "gcache.dir", ".", "gcache.keep_pages_size", "0", "gcache.mem_size", "0", "gcache.name", "galera.cache", "gcache.page_size", "128M", "gcache.recover", "yes", "gcache.size", "128M", "gcomm.thread_prio", "", "gcs.fc_debug", "0", "gcs.fc_factor", "1.0", "gcs.fc_limit", "16", "gcs.fc_master_slave", "no", "gcs.max_packet_size", "64500", "gcs.max_throttle", "0.25", #if (GU_WORDSIZE == 32) "gcs.recv_q_hard_limit", "2147483647", #elif (GU_WORDSIZE == 64) "gcs.recv_q_hard_limit", "9223372036854775807", #endif "gcs.recv_q_soft_limit", "0.25", "gcs.sync_donor", "no", "gmcast.listen_addr", "tcp://0.0.0.0:4567", "gmcast.mcast_addr", "", "gmcast.mcast_ttl", "1", "gmcast.peer_timeout", "PT3S", "gmcast.segment", "0", "gmcast.time_wait", "PT5S", "gmcast.version", "0", // "ist.recv_addr", no default, "pc.announce_timeout", "PT3S", "pc.checksum", "false", "pc.ignore_quorum", "false", "pc.ignore_sb", "false", "pc.linger", "PT20S", "pc.npvo", "false", "pc.recovery", "true", "pc.version", "0", "pc.wait_prim", "true", "pc.wait_prim_timeout", "PT30S", "pc.weight", "1", "protonet.backend", "asio", "protonet.version", "0", "repl.causal_read_timeout", "PT30S", "repl.commit_order", "3", "repl.key_format", "FLAT8", "repl.max_ws_size", "2147483647", "repl.proto_max", "10", #ifndef NDEBUG "signal", "", #endif "socket.checksum", "2", "socket.recv_buf_size", "212992", // "socket.ssl", no default, // "socket.ssl_cert", no default, // "socket.ssl_cipher", no default, // "socket.ssl_compression", no default, // "socket.ssl_key", no default, NULL }; typedef std::map DefaultsMap; static void fill_in_expected(DefaultsMap& map, const char* def_list[]) { for (int i(0); def_list[i] != NULL; i += 2) { std::pair param(def_list[i], def_list[i+1]); DefaultsMap::iterator it(map.insert(param).first); fail_if(it == map.end(), "Failed to insert KV pair: %s = %s", param.first.c_str(), param.second.c_str()); } } static void fill_in_real(DefaultsMap& map, wsrep_t& provider) { std::vector > kv_pairs; char* const opt_string(provider.options_get(&provider)); gu::Config::parse(kv_pairs, opt_string); ::free(opt_string); for (unsigned int i(0); i < kv_pairs.size(); ++i) { std::pair const trimmed(kv_pairs[i].first, kv_pairs[i].second); DefaultsMap::iterator it(map.insert(trimmed).first); fail_if(it == map.end(), "Failed to insert KV pair: %s = %s", trimmed.first.c_str(), trimmed.second.c_str()); } } static void log_cb(wsrep_log_level_t l, const char* c) { if (l <= WSREP_LOG_ERROR) // only log errors to avoid output clutter { std::cerr << c << '\n'; } } struct app_ctx { gu::Mutex mtx_; gu::Cond cond_; wsrep_t provider_; bool connected_; app_ctx() : mtx_(), cond_(), provider_(), connected_(false) {} }; static enum wsrep_cb_status conn_cb(void* ctx, const wsrep_view_info_t* view) { (void)view; app_ctx* c(static_cast(ctx)); gu::Lock lock(c->mtx_); if (!c->connected_) { c->connected_ = true; c->cond_.broadcast(); } else { assert(0); } return WSREP_CB_SUCCESS; } static enum wsrep_cb_status view_cb(void* app_ctx, void* recv_ctx, const wsrep_view_info_t* view, const char* state, size_t state_len) { /* make compilers happy about unused arguments */ (void)app_ctx; (void)recv_ctx; (void)view; (void)state; (void)state_len; return WSREP_CB_SUCCESS; } static enum wsrep_cb_status synced_cb(void* app_ctx) { (void)app_ctx; return WSREP_CB_SUCCESS; } static void* recv_func(void* ctx) { app_ctx* c(static_cast(ctx)); wsrep_t& provider(c->provider_); wsrep_status_t const ret(provider.recv(&provider, NULL)); fail_if(WSREP_OK != ret, "recv() returned %d", ret); return NULL; } START_TEST(defaults) { DefaultsMap expected_defaults, real_defaults; fill_in_expected(expected_defaults, Defaults); app_ctx ctx; wsrep_t& provider(ctx.provider_); int ret = wsrep_status_t(wsrep_loader(&provider)); fail_if(WSREP_OK != ret); struct wsrep_init_args init_args = { &ctx, // void* app_ctx /* Configuration parameters */ NULL, // const char* node_name NULL, // const char* node_address NULL, // const char* node_incoming NULL, // const char* data_dir NULL, // const char* options 0, // int proto_ver /* Application initial state information. */ NULL, // const wsrep_gtid_t* state_id NULL, // const wsrep_buf_t* state /* Application callbacks */ log_cb, // wsrep_log_cb_t logger_cb conn_cb,// wsrep_connected_cb_t connected_cb view_cb,// wsrep_view_cb_t view_handler_cb NULL, // wsrep_sst_request_cb_t sst_request_cb NULL, // wsrep_encrypt_cb_t encrypt_cb /* Applier callbacks */ NULL, // wsrep_apply_cb_t apply_cb NULL, // wsrep_unordered_cb_t unordered_cb /* State Snapshot Transfer callbacks */ NULL, // wsrep_sst_donate_cb_t sst_donate_cb synced_cb,// wsrep_synced_cb_t synced_cb }; ret = provider.init(&provider, &init_args); fail_if(WSREP_OK != ret); /* some defaults are set only on connection attmept */ ret = provider.connect(&provider, "cluster_name", "gcomm://", "", false); fail_if(WSREP_OK != ret, "connect() returned %d", ret); fill_in_real(real_defaults, provider); mark_point(); if (WSREP_OK == ret) /* if connect() was a success, need to disconnect() */ { /* some configuration change events need to be received */ gu_thread_t recv_thd; gu_thread_create(&recv_thd, NULL, recv_func, &ctx); mark_point(); /* @todo:there is a race condition in the library when disconnect() is * called right after connect() */ { /* sync with connect callback */ gu::Lock lock(ctx.mtx_); while(!ctx.connected_) lock.wait(ctx.cond_); } mark_point(); ret = provider.disconnect(&provider); fail_if(WSREP_OK != ret, "disconnect() returned %d", ret); ret = gu_thread_join(recv_thd, NULL); fail_if(0 != ret, "Could not join thread: %d (%s)", ret, strerror(ret)); } provider.free(&provider); mark_point(); /* cleanup files */ ::unlink(real_defaults.find("gcache.name")->second.c_str()); ::unlink("grastate.dat"); /* now compare expected and real maps */ std::ostringstream err; DefaultsMap::iterator expected(expected_defaults.begin()); while (expected != expected_defaults.end()) { DefaultsMap::iterator real(real_defaults.find(expected->first)); if (real != real_defaults.end()) { if (expected->second != VARIABLE && expected->second != real->second) { err << "Provider default for " << real->first << ": " << real->second << " differs from expected " << expected->second << '\n'; } real_defaults.erase(real); } else { err << "Provider missing " << expected->first <<" parameter\n"; } expected_defaults.erase(expected++); } mark_point(); DefaultsMap::iterator real(real_defaults.begin()); while (real != real_defaults.end()) { err << "Provider has extra parameter: " << real->first << " = " << real->second << '\n'; real_defaults.erase(real++); } fail_if (!err.str().empty(), "Defaults discrepancies detected:\n%s", err.str().c_str()); } END_TEST Suite* defaults_suite() { Suite* s = suite_create("Defaults"); TCase* tc; tc = tcase_create("defaults"); tcase_add_test(tc, defaults); tcase_set_timeout(tc, 120); suite_add_tcase(s, tc); return s; } galera-26.4.3/galera/tests/certification_check.cpp0000664000177500017540000004176613540715002020455 0ustar dbartmy// // Copyright (C) 2015-2019 Codership Oy // #include "replicator_smm.hpp" // ReplicatorSMM::InitConfig #include "certification.hpp" #include "trx_handle.hpp" #include "key_os.hpp" #include "GCache.hpp" #include "gu_config.hpp" #include namespace { class TestEnv { public: TestEnv() : conf_ (), init_ (conf_), gcache_ (conf_, ".") { } ~TestEnv() { ::unlink(GCACHE_NAME.c_str()); } gu::Config& conf() { return conf_ ; } gcache::GCache& gcache() { return gcache_; } private: static std::string const GCACHE_NAME; gu::Config conf_; struct Init { galera::ReplicatorSMM::InitConfig init_; Init(gu::Config& conf) : init_(conf, NULL, NULL) { conf.set("gcache.name", GCACHE_NAME); conf.set("gcache.size", "1M"); } } init_; gcache::GCache gcache_; }; struct WSInfo { wsrep_uuid_t uuid; wsrep_conn_id_t conn_id; wsrep_trx_id_t trx_id; wsrep_buf_t key[3]; size_t iov_len; bool shared; wsrep_seqno_t local_seqno; wsrep_seqno_t global_seqno; wsrep_seqno_t last_seen_seqno; wsrep_seqno_t expected_depends_seqno; int flags; galera::Certification::TestResult result; const char data_ptr[24]; size_t data_len; }; } std::string const TestEnv::GCACHE_NAME = "cert.cache"; static void run_wsinfo(const WSInfo* const wsi, size_t const nws, int const version) { galera::TrxHandleMaster::Pool mp( sizeof(galera::TrxHandleMaster) + sizeof(galera::WriteSetOut), 16, "certification_mp"); galera::TrxHandleSlave::Pool sp( sizeof(galera::TrxHandleSlave), 16, "certification_sp"); TestEnv env; galera::Certification cert(env.conf(), 0); cert.assign_initial_position(gu::GTID(), version); galera::TrxHandleMaster::Params const trx_params( "", version, galera::KeySet::MAX_VERSION); mark_point(); for (size_t i(0); i < nws; ++i) { galera::TrxHandleMasterPtr trx(galera::TrxHandleMaster::New( mp, trx_params, wsi[i].uuid, wsi[i].conn_id, wsi[i].trx_id), galera::TrxHandleMasterDeleter()); trx->set_flags(wsi[i].flags); trx->append_key( galera::KeyData(version, wsi[i].key, wsi[i].iov_len, (wsi[i].shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE), true)); if (wsi[i].data_len) { trx->append_data(wsi[i].data_ptr, wsi[i].data_len, WSREP_DATA_ORDERED, false); } galera::WriteSetNG::GatherVector out; size_t size(trx->write_set_out().gather(trx->source_id(), trx->conn_id(), trx->trx_id(), out)); trx->finalize(wsi[i].last_seen_seqno); // serialize write set into gcache buffer gu::byte_t* buf(static_cast(env.gcache().malloc(size))); fail_unless(out.serialize(buf, size) == size); gcs_action act = {wsi[i].global_seqno, wsi[i].local_seqno, buf, static_cast(size), GCS_ACT_WRITESET}; galera::TrxHandleSlavePtr ts(galera::TrxHandleSlave::New(false, sp), galera::TrxHandleSlaveDeleter()); fail_unless(ts->unserialize(act) == size); galera::Certification::TestResult result(cert.append_trx(ts)); fail_unless(result == wsi[i].result, "g: %lld res: %d exp: %d", ts->global_seqno(), result, wsi[i].result); fail_unless(ts->depends_seqno() == wsi[i].expected_depends_seqno, "wsi: %zu g: %lld ld: %lld eld: %lld", i, ts->global_seqno(), ts->depends_seqno(), wsi[i].expected_depends_seqno); cert.set_trx_committed(*ts); if (ts->nbo_end() && ts->ends_nbo() != WSREP_SEQNO_UNDEFINED) { cert.erase_nbo_ctx(ts->ends_nbo()); } } } START_TEST(test_certification_trx_v3) { const int version(3); using galera::Certification; using galera::TrxHandle; using galera::void_cast; // TRX certification rules: // * WSInfo wsi[] = { // 1 - 4: shared - shared // First four cases are shared keys, they should not collide or // generate dependency // 1: no dependencies { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 1, 1, 0, 0, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 2: no dependencies { { {1, } }, 1, 2, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 2, 2, 0, 0, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK , {0}, 0}, // 3: no dependencies { { {2, } }, 1, 3, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 3, 3, 0, 0, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 4: no dependencies { { {3, } }, 1, 4, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 4, 4, 0, 0, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 5: shared - exclusive // 5: depends on 4 { { {2, } }, 1, 5, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, false, 5, 5, 0, 4, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 6 - 8: exclusive - shared // 6: collides with 5 { { {1, } }, 1, 6, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 6, 6, 4, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0}, // 7: depends on 5 { { {2, } }, 1, 7, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 7, 7, 4, 5, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 8: collides with 5 { { {1, } }, 1, 8, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, true, 8, 8, 4, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0}, // 9 - 10: shared key shadows dependency to 5 // 9: depends on 5 { { {2, } }, 1, 9, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 9, 9, 0, 5, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 10: depends on 5 { { {2, } }, 1, 10, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 10, 10, 6, 5, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 11 - 13: exclusive - shared - exclusive dependency { { {2, } }, 1, 11, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, false, 11, 11, 10, 10, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, { { {2, } }, 1, 12, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, true, 12, 12, 10, 11, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, { { {2, } }, 1, 13, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, false, 13, 13, 10, 12, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 14: conflicts with 13 { { {1, } }, 1, 14, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, false, 14, 14, 12, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0} }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); run_wsinfo(wsi, nws, version); } END_TEST START_TEST(test_certification_trx_different_level_v3) { const int version(3); using galera::Certification; using galera::TrxHandle; using galera::void_cast; // // Test the following cases: // 1) exclusive (k1, k2, k3) <-> exclusive (k1, k2) -> dependency // 2) exclusive (k1, k2) <-> exclusive (k1, k2, k3) -> conflict // WSInfo wsi[] = { // 1) { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, false, 1, 1, 0, 0, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, { { {2, } }, 2, 2, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, false, 2, 2, 0, 1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // 2) { { {2, } }, 2, 2, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, false, 3, 3, 2, 2, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1} }, 3, false, 4, 4, 2, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0} }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); run_wsinfo(wsi, nws, version); } END_TEST START_TEST(test_certification_toi_v3) { const int version(3); using galera::Certification; using galera::TrxHandle; using galera::void_cast; // Note that only exclusive keys are used for TOI. // TRX - TOI and TOI - TOI matches: // * TOI should always depend on preceding write set // TOI - TRX matches: // * if coming from the same source, dependency // * if coming from different sources, conflict // TOI - TOI matches: // * always dependency WSInfo wsi[] = { // TOI { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, false, 1, 1, 0, 0, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // TOI 2 Depends on TOI 1 { { {2, } }, 2, 2, { {void_cast("1"), 1}, }, 1, false, 2, 2, 0, 1, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // Trx 3 from the same source depends on TOI 2 { { {2, } }, 3, 3, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, false, 3, 3, 2, 2, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // Trx 4 from different source conflicts with 3 { { {3, } }, 3, 3, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, false, 4, 4, 2, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0}, // Non conflicting TOI 5 depends on 4 { { {1, } }, 2, 2, { {void_cast("2"), 1}, }, 1, false, 5, 5, 0, 4, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, // Trx 6 from different source conflicts with TOI 5 { { {3, } }, 3, 3, { {void_cast("2"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, false, 6, 6, 4, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0} }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); run_wsinfo(wsi, nws, version); } END_TEST START_TEST(test_certification_nbo) { log_info << "START: test_certification_nbo"; const int version(4); using galera::Certification; using galera::TrxHandle; using galera::void_cast; // Non blocking operations with respect to TOI // NBO - TOI: Always conflict // TOI - NBO: Always dependency WSInfo wsi[] = { // 1 - 2: NBO(1) - TOI(2) // 1 - 3: NBO(1) - NBO(3) { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, false, 1, 1, 0, 0, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN, Certification::TEST_OK, {0}, 0}, { { {1, } }, 2, 2, { {void_cast("1"), 1}, }, 1, false, 2, 2, 0, -1, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_FAILED, {0}, 0}, { { {1, } }, 3, 3, { {void_cast("1"), 1}, }, 1, false, 3, 3, 0, -1, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN, Certification::TEST_FAILED, {0}, 0}, // 4 - 5 no conflict, different key { { {1, } }, 4, 4, { {void_cast("2"), 1}, }, 1, false, 4, 4, 0, 3, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, { { {2, } }, 5, 5, { {void_cast("2"), 1}, }, 1, false, 5, 5, 0, 4, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN, Certification::TEST_OK, {0}, 0}, // 6 ends the NBO with key 1 // notice the same uuid, conn_id/trx_id as the first entry { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, false, 6, 6, 0, 5, TrxHandle::F_ISOLATION | TrxHandle::F_COMMIT, Certification::TEST_OK, {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, 24 }, // 7 should now succeed { { {1, } }, 7, 7, { {void_cast("1"), 1}, }, 1, false, 7, 7, 0, 6, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0} }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); run_wsinfo(wsi, nws, version); log_info << "END: test_certification_nbo"; } END_TEST START_TEST(test_certification_commit_fragment) { const int version(3); using galera::Certification; using galera::TrxHandle; using galera::void_cast; WSInfo wsi[] = { // commit fragment vs commit fragment { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, true, 1, 1, 0, 0, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT | TrxHandle::F_PA_UNSAFE, Certification::TEST_OK, {0}, 0}, { { {2, } }, 2, 2, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, true, 2, 2, 0, 1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT | TrxHandle::F_PA_UNSAFE, Certification::TEST_OK, {0}, 0}, // TOI vs commit fragment { { {2, } }, 2, 2, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, false, 3, 3, 2, 2, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0}, { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, true, 4, 4, 2, -1, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT | TrxHandle::F_PA_UNSAFE, Certification::TEST_FAILED, {0}, 0}, // commit fragment vs TOI { { {2, } }, 2, 2, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, true, 5, 5, 3, 4, TrxHandle::F_BEGIN | TrxHandle::F_COMMIT | TrxHandle::F_PA_UNSAFE, Certification::TEST_OK, {0}, 0}, { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, false, 6, 6, 4, 5, TrxHandle::F_ISOLATION | TrxHandle::F_BEGIN | TrxHandle::F_COMMIT, Certification::TEST_OK, {0}, 0} }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); run_wsinfo(wsi, nws, version); } END_TEST Suite* certification_suite() { Suite* s(suite_create("certification")); TCase* t; t = tcase_create("certification_trx_v3"); tcase_add_test(t, test_certification_trx_v3); suite_add_tcase(s, t); t = tcase_create("certification_toi_v3"); tcase_add_test(t, test_certification_toi_v3); suite_add_tcase(s, t); t = tcase_create("certification_trx_different_level_v3"); tcase_add_test(t, test_certification_trx_different_level_v3); suite_add_tcase(s, t); t = tcase_create("certification_toi_v3"); tcase_add_test(t, test_certification_toi_v3); suite_add_tcase(s, t); t = tcase_create("certification_nbo"); tcase_add_test(t, test_certification_nbo); t = tcase_create("certification_commit_fragment"); tcase_add_test(t, test_certification_commit_fragment); suite_add_tcase(s, t); return s; } galera-26.4.3/galera/tests/SConscript0000664000177500017540000000277613540715002016001 0ustar dbartmy Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcache/src #/gcs/src #/galera/src ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) env.Prepend(LIBS=File('#/gcs/src/libgcs.a')) env.Prepend(LIBS=File('#/gcache/src/libgcache.a')) env.Prepend(LIBS=File('#/galera/src/libgalera++.a')) galera_check = env.Program(target='galera_check', source=Split(''' galera_check.cpp data_set_check.cpp key_set_check.cpp write_set_ng_check.cpp certification_check.cpp trx_handle_check.cpp service_thd_check.cpp ist_check.cpp saved_state_check.cpp defaults_check.cpp ''')) # write_set_check.cpp stamp = "galera_check.passed" env.Test(stamp, galera_check) env.Alias("test", stamp) Clean(galera_check, ['#/galera_check.log', 'ist_check.cache']) galera-26.4.3/galera/tests/data_set_check.cpp0000664000177500017540000001731113540715002017403 0ustar dbartmy/* Copyright (C) 2013 Codership Oy * * $Id$ */ #undef NDEBUG #include "../src/data_set.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include using namespace galera; class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; class TestRecord { public: TestRecord (size_t size, const char* str) : size_(size), buf_(reinterpret_cast(::malloc(size_))), str_(reinterpret_cast(buf_) + sizeof(uint32_t)), own_(true) { if (0 == buf_) throw std::runtime_error("failed to allocate record"); void* tmp = const_cast(buf_); *reinterpret_cast(tmp) = htog32(size_); ::strncpy (const_cast(str_), str, size_ - 4); } TestRecord (const void* const buf, ssize_t const size) : size_(TestRecord::serial_size(buf, size)), buf_(buf), str_(reinterpret_cast(buf_) + sizeof(uint32_t)), own_(false) {} TestRecord (const TestRecord& t) : size_(t.size_), buf_(t.buf_), str_(t.str_), own_(false) {} virtual ~TestRecord () { if (own_) free (const_cast(buf_)); } const void* buf() const { return buf_; } const char* c_str() const { return str_; } ssize_t serial_size() const { return my_serial_size(); } static ssize_t serial_size(const void* const buf, ssize_t const size) { check_buf (buf, size, 1); return gtoh32 (*reinterpret_cast(buf)); } bool operator!= (const TestRecord& t) const { return (size_ != t.size_ || ::memcmp(buf_, t.buf_, size_)); } bool operator== (const TestRecord& t) const { return (!(*this != t)); } private: size_t const size_; const void* const buf_; const char* const str_; bool const own_; ssize_t my_serial_size () const { return size_; }; ssize_t my_serialize_to (void* buf, ssize_t size) const { check_buf (buf, size, size_); ::memcpy (buf, buf_, size_); return size_; } static void check_buf (const void* const buf, ssize_t const size, ssize_t min_size) { if (gu_unlikely (buf == 0 || size < min_size)) throw std::length_error("buffer too short"); } TestRecord& operator= (const TestRecord&); }; static void test_ver(gu::RecordSet::Version const rsv) { int const alignment (rsv >= gu::RecordSet::VER2 ? gu::RecordSet::VER2_ALIGNMENT : 1); size_t const MB = 1 << 20; TestRecord rout0(128, "abc0"); TestRecord rout1(127, "abc1"); TestRecord rout2(126, "012345"); TestRecord rout3(125, "defghij"); TestRecord rout4(3*MB, "klm"); TestRecord rout5(1*MB, "qpr"); std::vector records; records.push_back (&rout0); records.push_back (&rout1); records.push_back (&rout2); records.push_back (&rout3); records.push_back (&rout4); records.push_back (&rout5); union { gu::byte_t buf[1024]; gu_word_t align; } reserved; TestBaseName str("data_set_test"); DataSetOut dset_out(reserved.buf, sizeof(reserved.buf), str, DataSet::VER1, rsv); size_t offset(dset_out.size()); // this should be allocated inside current page offset += dset_out.append (rout0.buf(), rout0.serial_size(), true); fail_if (dset_out.size() != offset, "expected: %zu, got %zu", offset, dset_out.size()); // this should trigger new page since not stored offset += dset_out.append (rout1.buf(), rout1.serial_size(), false); fail_if (dset_out.size() != offset); // this should trigger new page since previous one was not stored offset += dset_out.append (rout2.buf(), rout2.serial_size(), true); fail_if (dset_out.size() != offset); // this should trigger a new page, since not stored offset += dset_out.append (rout3.buf(), rout3.serial_size(), false); fail_if (dset_out.size() != offset); // this should trigger new page, because won't fit in the current page offset += dset_out.append (rout4.buf(), rout4.serial_size(), true); fail_if (dset_out.size() != offset); // this should trigger new page, because 4MB RAM limit exceeded offset += dset_out.append (rout5.buf(), rout5.serial_size(), false); fail_if (dset_out.size() != offset); fail_if (1 != size_t(dset_out.count())); DataSetOut::GatherVector out_bufs; out_bufs().reserve (dset_out.page_count()); bool const padding_page(offset % alignment); size_t min_out_size(0); for (size_t i = 0; i < records.size(); ++i) { min_out_size += records[i]->serial_size(); } size_t const out_size (dset_out.gather (out_bufs)); fail_if(out_size % alignment, "out size %zu not aligned by %d", out_size, alignment); fail_if (out_size <= min_out_size || out_size > offset); fail_if (out_bufs->size() > size_t(dset_out.page_count()) || out_bufs->size() < size_t(dset_out.page_count() - padding_page), "Expected %zu buffers, got: %zd", dset_out.page_count(), out_bufs->size()); /* concatenate all buffers into one */ std::vector in_buf; in_buf.reserve(out_size); mark_point(); for (size_t i = 0; i < out_bufs->size(); ++i) { fail_if (0 == out_bufs[i].ptr); log_info << "\nadding buf " << i << ": " << gu::Hexdump(out_bufs[i].ptr, std::min(out_bufs[i].size, 24), true); size_t old_size = in_buf.size(); const gu::byte_t* ptr (reinterpret_cast(out_bufs[i].ptr)); in_buf.insert (in_buf.end(), ptr, ptr + out_bufs[i].size); fail_if (old_size + out_bufs[i].size != in_buf.size()); } fail_if (in_buf.size() != out_size, "Sent buf size: %zu, recvd buf size: %zu", out_size, in_buf.size()); log_info << "Resulting DataSet buffer:\n" << gu::Hexdump(in_buf.data(), 32, false) << '\n' << gu::Hexdump(in_buf.data(), 32, true); galera::DataSetIn const dset_in(dset_out.version(), in_buf.data(), in_buf.size()); fail_if (dset_in.size() != dset_out.size()); fail_if (dset_in.count() != dset_out.count()); try { dset_in.checksum(); } catch(gu::Exception& e) { fail(e.what()); } for (ssize_t i = 0; i < dset_in.count(); ++i) { gu::Buf data = dset_in.next(); TestRecord const rin(data.ptr, data.size); fail_if (rin != *records[i], "Record %d failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } galera::DataSetIn dset_in_empty; dset_in_empty.init(dset_out.version(), in_buf.data(), in_buf.size()); fail_if (dset_in_empty.size() != dset_out.size()); fail_if (dset_in_empty.count() != dset_out.count()); for (ssize_t i = 0; i < dset_in_empty.count(); ++i) { gu::Buf data = dset_in_empty.next(); TestRecord const rin(data.ptr, data.size); fail_if (rin != *records[i], "Record %d failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } } START_TEST (ver1) { test_ver(gu::RecordSet::VER1); } END_TEST START_TEST (ver2) { test_ver(gu::RecordSet::VER2); } END_TEST Suite* data_set_suite () { TCase* t = tcase_create ("DataSet"); tcase_add_test (t, ver1); tcase_add_test (t, ver2); tcase_set_timeout(t, 60); Suite* s = suite_create ("DataSet"); suite_add_tcase (s, t); return s; } galera-26.4.3/galera/tests/key_set_check.cpp0000664000177500017540000003010013540715002017251 0ustar dbartmy/* copyright (C) 2013-2018 Codership Oy * * $Id$ */ #undef NDEBUG #include "test_key.hpp" #include "../src/key_set.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include using namespace galera; class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; static size_t version_to_hash_size (KeySet::Version const ver) { switch (ver) { case KeySet::FLAT16: fail("FLAT16 is not supported by test"); case KeySet::FLAT16A: return 16; case KeySet::FLAT8: fail ("FLAT8 is not supported by test"); case KeySet::FLAT8A: return 8; default: fail ("Unsupported KeySet verison: %d", ver); } abort(); } static void test_ver(gu::RecordSet::Version const rsv, int const ws_ver) { int const alignment (rsv >= gu::RecordSet::VER2 ? gu::RecordSet::VER2_ALIGNMENT : 1); KeySet::Version const tk_ver(KeySet::FLAT16A); size_t const base_size(version_to_hash_size(tk_ver)); union { gu::byte_t buf[1024]; gu_word_t align; } reserved; assert((uintptr_t(reserved.buf) % GU_WORD_BYTES) == 0); TestBaseName const str("key_set_test"); KeySetOut kso (reserved.buf, sizeof(reserved.buf), str, tk_ver, rsv, ws_ver); fail_if (kso.count() != 0); size_t total_size(kso.size()); log_info << "Start size: " << total_size; TestKey tk0(tk_ver, WSREP_KEY_SHARED, false, "a0"); kso.append(tk0()); fail_if (kso.count() != 1); total_size += base_size + 2 + 1*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); kso.append(tk0()); fail_if (kso.count() != 1); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); TestKey tk1(tk_ver, WSREP_KEY_SHARED, true, "a0", "a1", "a2"); mark_point(); kso.append(tk1()); fail_if (kso.count() != 3, "key count: expected 3, got %d", kso.count()); total_size += base_size + 2 + 2*4; total_size = GU_ALIGN(total_size, alignment); total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); TestKey tk2(tk_ver, WSREP_KEY_EXCLUSIVE, false, "a0", "a1", "b2"); kso.append(tk2()); fail_if (kso.count() != 4, "key count: expected 4, got %d", kso.count()); total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* this should update a sronger version of "a2" */ TestKey tk2_(tk_ver, WSREP_KEY_REFERENCE, false, "a0", "a1", "a2"); kso.append(tk2_()); fail_if (kso.count() != 5, "key count: expected 5, got %d", kso.count()); total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* it is a duplicate, but it should add an exclusive verision of the key */ TestKey tk3(tk_ver, WSREP_KEY_EXCLUSIVE, true, "a0", "a1"); log_info << "######## Appending exclusive duplicate tk3: begin"; kso.append(tk3()); log_info << "######## Appending exclusive duplicate tk3: end"; fail_if (kso.count() != 6, "key count: expected 6, got %d", kso.count()); total_size += base_size + 2 + 2*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* tk3 should make it impossible to add anything past a0:a1 */ TestKey tk4(tk_ver, WSREP_KEY_EXCLUSIVE, false, "a0", "a1", "c2"); log_info << "######## Appending exclusive duplicate tk4: begin"; kso.append(tk4()); log_info << "######## Appending exclusive duplicate tk4: end"; fail_if (kso.count() != 6, "key count: expected 6, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* adding shared key should have no effect */ TestKey tk5(tk_ver, WSREP_KEY_SHARED, true, "a0", "a1"); kso.append(tk5()); fail_if (kso.count() != 6, "key count: expected 6, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* adding REFERENCE key should have no effect */ TestKey tk5_1(tk_ver, WSREP_KEY_REFERENCE, true, "a0", "a1"); kso.append(tk5_1()); fail_if (kso.count() != 6, "key count: expected 6, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* adding UPDATE key should have no effect */ TestKey tk5_2(tk_ver, WSREP_KEY_UPDATE, true, "a0", "a1"); kso.append(tk5_2()); fail_if (kso.count() != 6, "key count: expected 6, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* tk5 should not make any changes */ TestKey tk6(tk_ver, WSREP_KEY_EXCLUSIVE, false, "a0", "a1", "c2"); kso.append(tk6()); fail_if (kso.count() != 6, "key count: expected 6, got %d", kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* a0:b1:... should still be possible, should add 2 keys: b1 and c2 */ TestKey tk7(tk_ver, WSREP_KEY_REFERENCE, true, "a0", "b1", "c2"); kso.append(tk7()); fail_if (kso.count() != 8, "key count: expected 8, got %d", kso.count()); total_size += base_size + 2 + 2*4; total_size = GU_ALIGN(total_size, alignment); total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); /* make sure a0:b1:b2 is possible despite we have a0:a1:b2 already * (should be no collision on b2) */ TestKey tk8(tk_ver, WSREP_KEY_REFERENCE, false, "a0", "b1", "b2"); kso.append(tk8()); fail_if (kso.count() != 9, "key count: expected 9, got %d", kso.count()); total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); int expected_count(kso.count()); TestKey tk8_1(tk_ver, WSREP_KEY_UPDATE, false, "a0", "b1", "b2"); kso.append(tk8_1()); if (3 == ws_ver || 4 == ws_ver) { /* versions 3, 4 do not distinguish REEFERENCE and UPDATE, the key should be ignored */ } else if (5 <= ws_ver) { /* in version 5 UPDATE is a stronger key than REFERENCE - should be * added to the set */ expected_count++; total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); } else abort(); fail_if (kso.count() != expected_count, "key count: expected %d, got %d", expected_count, kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); TestKey tk8_2(tk_ver, WSREP_KEY_EXCLUSIVE, false, "a0", "b1", "b2"); kso.append(tk8_2()); if (3 == ws_ver) { /* version 3 does not distinguish REFERENCE, UPDATE and EXCLUSIVE, the key should be ignored */ } else if (4 <= ws_ver) { /* in version 4 EXCLUSIVE is a stronger key than REFERENCE and * in version 5 EXCLUSIVE is a stronger key than UPDATE - should be * added to the set */ expected_count++; total_size += base_size + 2 + 3*4; total_size = GU_ALIGN(total_size, alignment); } else abort(); fail_if (kso.count() != expected_count, "key count: expected %d, got %d", expected_count, kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); TestKey tk8_3(tk_ver, WSREP_KEY_UPDATE, false, "a0", "b1", "b2"); kso.append(tk8_3()); /* UPDATE key is weaker than EXCLUSIVE, should be ignored */ fail_if (kso.count() != expected_count, "key count: expected %d, got %d", expected_count, kso.count()); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); log_info << "size before huge key: " << total_size; char huge_key[2048]; memset (huge_key, 'x', sizeof(huge_key)); huge_key[ sizeof(huge_key) - 1 ] = 0; TestKey tk9(tk_ver, WSREP_KEY_EXCLUSIVE, false, huge_key, huge_key,huge_key); kso.append(tk9()); expected_count += 3; fail_if (kso.count() != expected_count, "key count: expected %d, got %d", expected_count, kso.count()); total_size += base_size + 2 + 1*256; total_size = GU_ALIGN(total_size, alignment); total_size += base_size + 2 + 2*256; total_size = GU_ALIGN(total_size, alignment); total_size += base_size + 2 + 3*256; total_size = GU_ALIGN(total_size, alignment); fail_if (total_size != kso.size(), "Size: %zu, expected: %zu", kso.size(), total_size); log_info << "End size: " << kso.size(); KeySetOut::GatherVector out; out->reserve(kso.page_count()); size_t const out_size(kso.gather(out)); log_info << "Gather size: " << out_size << ", buf count: " << out->size(); fail_if(out_size % alignment, "out size not aligned by %d", out_size % alignment); std::vector in; in.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(reinterpret_cast(out[i].ptr)); in.insert (in.end(), ptr, ptr + out[i].size); } fail_if (in.size() != out_size); KeySetIn ksi (kso.version(), in.data(), in.size()); fail_if (ksi.count() != kso.count(), "Received keys: %zu, expected: %zu", ksi.count(), kso.count()); fail_if (ksi.size() != kso.size(), "Received size: %zu, expected: %zu", ksi.size(), kso.size()); try { ksi.checksum(); } catch (std::exception& e) { fail("%s", e.what()); } int shared(0); // to stiffle clang complaints about unused variables int const P_SHARED(KeySet::KeyPart::prefix(WSREP_KEY_SHARED, ws_ver)); for (int i(0); i < ksi.count(); ++i) { KeySet::KeyPart kp(ksi.next()); shared += (kp.prefix() == P_SHARED); } KeySetIn ksi_empty; fail_if (ksi_empty.count() != 0, "Received keys: %zu, expected: %zu", ksi_empty.count(), 0); fail_if (ksi_empty.size() != 0, "Received size: %zu, expected: %zu", ksi_empty.size(), 0); ksi_empty.init (kso.version(), in.data(), in.size()); fail_if (ksi_empty.count() != kso.count(), "Received keys: %zu, expected: %zu", ksi_empty.count(),kso.count()); fail_if (ksi_empty.size() != kso.size(), "Received size: %zu, expected: %zu", ksi_empty.size(), kso.size()); try { ksi_empty.checksum(); } catch (std::exception& e) { fail("%s", e.what()); } for (int i(0); i < ksi_empty.count(); ++i) { KeySet::KeyPart kp(ksi_empty.next()); shared += (kp.prefix() == P_SHARED); } ksi_empty.rewind(); for (int i(0); i < ksi_empty.count(); ++i) { KeySet::KeyPart kp(ksi_empty.next()); shared += (kp.prefix() == P_SHARED); } fail_if(0 == shared); } START_TEST (ver1_3) { test_ver(gu::RecordSet::VER1, 3); } END_TEST START_TEST (ver2_3) { test_ver(gu::RecordSet::VER2, 3); } END_TEST START_TEST (ver2_4) { test_ver(gu::RecordSet::VER2, 4); } END_TEST START_TEST (ver2_5) { test_ver(gu::RecordSet::VER2, 5); } END_TEST Suite* key_set_suite () { TCase* t = tcase_create ("KeySet"); tcase_add_test (t, ver1_3); tcase_add_test (t, ver2_3); tcase_add_test (t, ver2_4); tcase_add_test (t, ver2_5); tcase_set_timeout(t, 60); Suite* s = suite_create ("KeySet"); suite_add_tcase (s, t); return s; } galera-26.4.3/galera/tests/trx_handle_check.cpp0000664000177500017540000002531213540715002017747 0ustar dbartmy// // Copyright (C) 2010-2017 Codership Oy // #include "trx_handle.hpp" #include #include #include using namespace std; using namespace galera; template void check_states_graph( int graph[TrxHandle::num_states_][TrxHandle::num_states_], T* trx, const std::vector& visits) { // Check that no allowed state transition causes an abort std::vector visited(TrxHandle::num_states_); std::fill(visited.begin(), visited.end(), 0); for (int i(0); i < TrxHandle::num_states_; ++i) { trx->force_state(TrxHandle::State(i)); for (int j(0); j < TrxHandle::num_states_; ++j) { if (graph[i][j]){ log_info << "Checking transition " << trx->state() << " -> " << TrxHandle::State(j); trx->set_state(TrxHandle::State(j)); visited[i] = 1; visited[j] = 1; } else { // TODO: Currently FSM transition calls abort on // unknown transition, figure out how to fix it // to verify also that incorrect transitions cause // proper error. } trx->force_state(TrxHandle::State(i)); } } for (int i(0); i < TrxHandle::num_states_; ++i) { fail_unless(visited[i] == visits[i], "i = %i visited = %i visits = %i", i, visited[i], visits[i]); } } START_TEST(test_states_master) { log_info << "START test_states_master"; TrxHandleMaster::Pool tp(TrxHandleMaster::LOCAL_STORAGE_SIZE(), 16, "test_states_master"); wsrep_uuid_t uuid = {{1, }}; // first check basic stuff // 1) initial state is executing // 2) invalid state changes are caught // 3) valid state changes change state TrxHandleMasterPtr trx(TrxHandleMaster::New(tp, TrxHandleMaster::Defaults, uuid, -1, 1), TrxHandleMasterDeleter()); galera::TrxHandleLock lock(*trx); fail_unless(trx->state() == TrxHandle::S_EXECUTING); // Matrix representing directed graph of TrxHandleMaster transitions, // see galera/src/trx_handle.cpp // EXECUTING 0 // MUST_ABORT 1 // ABORTING 2 // REPLICATING 3 // CERTIFYING 4 // MUST_REPLAY 5 // REPLAYING 6 // APPLYING 7 // COMMITTING 8 // ROLLING_BACK 9 // COMMITTED 10 // ROLLED_BACK 11 int state_trans_master[TrxHandle::num_states_][TrxHandle::num_states_] = { // 0 1 2 3 4 5 6 7 8 9 10 11 To / From { 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1 }, // 0 { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 1 { 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1 }, // 2 { 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 }, // 3 { 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, // 4 { 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0 }, // 5 { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 }, // 6 { 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 }, // 7 { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 }, // 8 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }, // 9 { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 10 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } // 11 }; // Visits all states std::vector visits(TrxHandle::num_states_); std::fill(visits.begin(), visits.end(), 1); check_states_graph(state_trans_master, trx.get(), visits); } END_TEST START_TEST(test_states_slave) { log_info << "START test_states_slave"; TrxHandleSlave::Pool sp(sizeof(TrxHandleSlave), 16, "test_states_slave"); int state_trans_slave[TrxHandle::num_states_][TrxHandle::num_states_] = { // 0 1 2 3 4 5 6 7 8 9 10 11 To / From { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 0 EXECUTING { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 1 MUST_ABORT { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 }, // 2 ABORTING { 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0 }, // 3 REPLICATING { 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, // 4 CERTIFYING { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 5 MUST_REPLAY { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 6 REPLAYING { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 }, // 7 APPLYING { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 }, // 8 COMMITTNG { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }, // 9 ROLLING_BACK { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // 10 COMMITTED { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } // 11 ROLLED_BACK }; TrxHandleSlavePtr ts(TrxHandleSlave::New(false, sp), TrxHandleSlaveDeleter()); fail_unless(ts->state() == TrxHandle::S_REPLICATING); // Visits only REPLICATING, CERTIFYING, APPLYING, COMMITTING, COMMITTED, // ROLLED_BACK std::vector visits(TrxHandle::num_states_); std::fill(visits.begin(), visits.end(), 0); visits[TrxHandle::S_ABORTING] = 1; visits[TrxHandle::S_REPLICATING] = 1; visits[TrxHandle::S_CERTIFYING] = 1; visits[TrxHandle::S_APPLYING] = 1; visits[TrxHandle::S_COMMITTING] = 1; visits[TrxHandle::S_COMMITTED] = 1; visits[TrxHandle::S_ROLLING_BACK] = 1; visits[TrxHandle::S_ROLLED_BACK] = 1; check_states_graph(state_trans_slave, ts.get(), visits); } END_TEST START_TEST(test_serialization) { TrxHandleMaster::Pool lp(4096, 16, "serialization_lp"); TrxHandleSlave::Pool sp(sizeof(TrxHandleSlave), 16, "serialization_sp"); for (int version = 3; version <= 5; ++version) { galera::TrxHandleMaster::Params const trx_params("", version, KeySet::MAX_VERSION); wsrep_uuid_t uuid; gu_uuid_generate(&uuid, 0, 0); TrxHandleMasterPtr trx (TrxHandleMaster::New(lp, trx_params, uuid, 4567, 8910), TrxHandleMasterDeleter()); std::vector buf; trx->serialize(0, buf); fail_unless(buf.size() > 0); TrxHandleSlavePtr txs1(TrxHandleSlave::New(false, sp), TrxHandleSlaveDeleter()); gcs_action const act = { 1, 2, buf.data(), int(buf.size()), GCS_ACT_WRITESET}; fail_unless(txs1->unserialize(act) > 0); fail_if(txs1->global_seqno() != act.seqno_g); fail_if(txs1->local_seqno() != act.seqno_l); } } END_TEST static enum wsrep_cb_status apply_cb( void* ctx, const wsrep_ws_handle_t* wh, uint32_t flags, const wsrep_buf_t* data, const wsrep_trx_meta_t* meta, wsrep_bool_t* exit_loop ) { std::vector* const res(static_cast* >(ctx)); fail_if(NULL == res); const char* const c(static_cast(data->ptr)); fail_if(NULL == c); fail_if(1 != data->len); res->push_back(*c); return WSREP_CB_SUCCESS; } START_TEST(test_streaming) { TrxHandleMaster::Pool lp(4096, 16, "streaming_lp"); TrxHandleSlave::Pool sp(sizeof(TrxHandleSlave), 16, "streaming_sp"); int const version(galera::WriteSetNG::VER5); galera::TrxHandleMaster::Params const trx_params("", version, KeySet::MAX_VERSION); wsrep_uuid_t uuid; gu_uuid_generate(&uuid, 0, 0); TrxHandleMasterPtr trx(TrxHandleMaster::New(lp, trx_params, uuid, 4567,8910), TrxHandleMasterDeleter()); galera::TrxHandleLock lock(*trx); std::vector src(3); // initial wirteset src[0] = 'a'; src[1] = 'b'; src[2] = 'c'; std::vector res; // apply_cb should reproduce src in res fail_if(src == res); fail_unless(trx->flags() & TrxHandle::F_BEGIN); { // 0. first fragment A trx->append_data(&src[0], 1, WSREP_DATA_ORDERED, false); trx->finalize(0); std::vector buf; trx->serialize(0, buf); fail_unless(buf.size() > 0); trx->release_write_set_out(); TrxHandleSlavePtr ts(TrxHandleSlave::New(false, sp), TrxHandleSlaveDeleter()); gcs_action const act = { 1, 2, buf.data(), int(buf.size()), GCS_ACT_WRITESET}; fail_unless(ts->unserialize(act) > 0); fail_unless(ts->flags() & TrxHandle::F_BEGIN); fail_if(ts->flags() & TrxHandle::F_COMMIT); trx->add_replicated(ts); wsrep_bool_t exit_loop; ts->apply(&res, apply_cb, wsrep_trx_meta_t(), exit_loop); } { // 1. middle fragment B trx->append_data(&src[1], 1, WSREP_DATA_ORDERED, false); trx->finalize(1); std::vector buf; trx->serialize(0, buf); fail_unless(buf.size() > 0); trx->release_write_set_out(); TrxHandleSlavePtr ts(TrxHandleSlave::New(false, sp), TrxHandleSlaveDeleter()); gcs_action const act = { 2, 3, buf.data(), int(buf.size()), GCS_ACT_WRITESET}; fail_unless(ts->unserialize(act) > 0); fail_if(ts->flags() & TrxHandle::F_BEGIN); fail_if(ts->flags() & TrxHandle::F_COMMIT); trx->add_replicated(ts); wsrep_bool_t exit_loop; ts->apply(&res, apply_cb, wsrep_trx_meta_t(), exit_loop); } { // 2. last fragment C trx->append_data(&src[2], 1, WSREP_DATA_ORDERED, false); trx->set_flags(TrxHandle::F_COMMIT); // commit trx->finalize(2); std::vector buf; trx->serialize(0, buf); fail_unless(buf.size() > 0); trx->release_write_set_out(); TrxHandleSlavePtr ts(TrxHandleSlave::New(false, sp), TrxHandleSlaveDeleter()); gcs_action const act = { 3, 4, buf.data(), int(buf.size()), GCS_ACT_WRITESET}; fail_unless(ts->unserialize(act) > 0); fail_if(ts->flags() & TrxHandle::F_BEGIN); fail_unless(ts->flags() & TrxHandle::F_COMMIT); trx->add_replicated(ts); wsrep_bool_t exit_loop; ts->apply(&res, apply_cb, wsrep_trx_meta_t(), exit_loop); } fail_if(res != src); } END_TEST Suite* trx_handle_suite() { Suite* s = suite_create("trx_handle"); TCase* tc; tc = tcase_create("test_states_master"); tcase_add_test(tc, test_states_master); suite_add_tcase(s, tc); tc = tcase_create("test_states_slave"); tcase_add_test(tc, test_states_slave); suite_add_tcase(s, tc); tc = tcase_create("test_serialization"); tcase_add_test(tc, test_serialization); suite_add_tcase(s, tc); tc = tcase_create("test_streaming"); tcase_add_test(tc, test_streaming); suite_add_tcase(s, tc); return s; } galera-26.4.3/galera/tests/saved_state_check.cpp0000664000177500017540000001170213540715002020117 0ustar dbartmy/* * Copyright (C) 2012-2017 Codership Oy */ #include "../src/saved_state.hpp" #include #include #include #include #define __STDC_FORMAT_MACROS #include static volatile bool stop(false); using namespace galera; static void* thread_routine (void* arg) { SavedState* st(static_cast(arg)); do { st->mark_unsafe(); st->mark_safe(); } while (!stop); return NULL; } static const int max_threads(16); static gu_thread_t threads[max_threads]; static void start_threads(void* arg) { stop = false; for (int ret = 0; ret < max_threads; ++ret) { gu_thread_t t; int err = gu_thread_create (&t, NULL, thread_routine, arg); fail_if (err, "Failed to start thread %d: %d (%s)", ret, err, strerror(err)); threads[ret] = t; } } static void stop_threads() { stop = true; for (int t = 0; t < max_threads; ++t) { gu_thread_join(threads[t], NULL); } } static const char* fname("grastate.dat"); START_TEST(test_basic) { unlink (fname); wsrep_uuid_t uuid; wsrep_seqno_t seqno; bool safe_to_bootstrap; { SavedState st(fname); st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid != WSREP_UUID_UNDEFINED); fail_if (seqno != WSREP_SEQNO_UNDEFINED); fail_if (safe_to_bootstrap != true); gu_uuid_from_string("b2c01654-8dfe-11e1-0800-a834d641cfb5", uuid); seqno = 2345234LL; st.set(uuid, seqno, false); } { SavedState st(fname); wsrep_uuid_t u; wsrep_seqno_t s; bool stb; st.get(u, s, stb); fail_if (u != uuid); fail_if (s != seqno); fail_if (stb != false); } } END_TEST #define TEST_USLEEP 2500 // 2.5ms START_TEST(test_unsafe) { SavedState st(fname); wsrep_uuid_t uuid; wsrep_seqno_t seqno; bool safe_to_bootstrap; st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid == WSREP_UUID_UNDEFINED); fail_if (seqno == WSREP_SEQNO_UNDEFINED); fail_if (safe_to_bootstrap == true); st.set(uuid, WSREP_SEQNO_UNDEFINED, false); for (int i = 0; i < 100; ++i) { start_threads(&st); mark_point(); usleep (TEST_USLEEP); st.set(uuid, i, false); // make sure that state is not lost if set concurrently mark_point(); usleep (TEST_USLEEP); stop_threads(); mark_point(); st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid == WSREP_UUID_UNDEFINED); fail_if (seqno != i); fail_if (safe_to_bootstrap != false); } long marks, locks, writes; st.stats(marks, locks, writes); log_info << "Total marks: " << marks << ", total writes: " << writes << ", total locks: " << locks << "\nlocks ratio: " << (double(locks)/marks) << "\nwrites ratio: " << (double(writes)/locks); } END_TEST START_TEST(test_corrupt) { wsrep_uuid_t uuid; wsrep_seqno_t seqno; bool safe_to_bootstrap; { SavedState st(fname); st.get(uuid, seqno, safe_to_bootstrap); fail_if (uuid == WSREP_UUID_UNDEFINED); fail_if (seqno == WSREP_SEQNO_UNDEFINED); fail_if (safe_to_bootstrap == true); st.set(uuid, WSREP_SEQNO_UNDEFINED, false); } long marks(0), locks(0), writes(0); for (int i = 0; i < 100; ++i) { SavedState st(fname); // explicitly overwrite corruption mark. st.set (uuid, seqno, false); start_threads(&st); mark_point(); usleep (TEST_USLEEP); st.mark_corrupt(); st.set (uuid, seqno, false); // make sure that corrupt stays usleep (TEST_USLEEP); mark_point(); stop_threads(); mark_point(); wsrep_uuid_t u; wsrep_seqno_t s; bool stb; st.get(u, s, stb); // make sure that mark_corrupt() stays fail_if (u != WSREP_UUID_UNDEFINED); fail_if (s != WSREP_SEQNO_UNDEFINED); fail_if (stb != false); long m, l, w; st.stats(m, l, w); marks += m; locks += l; writes += w; } log_info << "Total marks: " << marks << ", total locks: " << locks << ", total writes: " << writes << "\nlocks ratio: " << (double(locks)/marks) << "\nwrites ratio: " << (double(writes)/locks); unlink (fname); } END_TEST #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (TEST_USLEEP); }} Suite* saved_state_suite() { Suite* s = suite_create ("saved_state"); TCase* tc; tc = tcase_create ("saved_state"); tcase_add_test (tc, test_basic); tcase_add_test (tc, test_unsafe); tcase_add_test (tc, test_corrupt); tcase_set_timeout(tc, 120); suite_add_tcase (s, tc); return s; } galera-26.4.3/galera/tests/ist_check.cpp0000664000177500017540000003263413540715002016423 0ustar dbartmy// // Copyright (C) 2011-2019 Codership Oy // #include "ist.hpp" #include "ist_proto.hpp" #include "trx_handle.hpp" #include "monitor.hpp" #include "replicator_smm.hpp" #include #include #include using namespace galera; // Message tests START_TEST(test_ist_message) { using namespace galera::ist; #if 0 /* This is a check for the old (broken) format */ Message m3(3, Message::T_HANDSHAKE, 0x2, 3, 1001); #if GU_WORDSIZE == 32 fail_unless(serial_size(m3) == 20, "serial size %zu != 20", serial_size(m3)); #elif GU_WORDSIZE == 64 fail_unless(serial_size(m3) == 24, "serial size %zu != 24", serial_size(m3)); #endif gu::Buffer buf(m3.serial_size()); m3.serialize(&buf[0], buf.size(), 0); Message mu3(3); mu3.unserialize(&buf[0], buf.size(), 0); fail_unless(mu3.version() == 3); fail_unless(mu3.type() == Message::T_HANDSHAKE); fail_unless(mu3.flags() == 0x2); fail_unless(mu3.ctrl() == 3); fail_unless(mu3.len() == 1001); #endif /* 0 */ Message const m2(VER21, Message::T_HANDSHAKE, 0x2, 3, 1001); size_t const s2(12); fail_unless(m2.serial_size() == s2, "Expected m2.serial_size() = %zd, got %zd", s2,m2.serial_size()); gu::Buffer buf2(m2.serial_size()); m2.serialize(&buf2[0], buf2.size(), 0); Message mu2(VER21); mu2.unserialize(&buf2[0], buf2.size(), 0); fail_unless(mu2.version() == VER21); fail_unless(mu2.type() == Message::T_HANDSHAKE); fail_unless(mu2.flags() == 0x2); fail_unless(mu2.ctrl() == 3); fail_unless(mu2.len() == 1001); Message const m4(VER40, Message::T_HANDSHAKE, 0x2, 3, 1001); size_t const s4(16 + sizeof(uint64_t /* Message::checksum_t */)); fail_unless(m4.serial_size() == s4, "Expected m3.serial_size() = %zd, got %zd", s4,m4.serial_size()); gu::Buffer buf4(m4.serial_size()); m4.serialize(&buf4[0], buf4.size(), 0); Message mu4(VER40); mu4.unserialize(&buf4[0], buf4.size(), 0); fail_unless(mu4.version() == VER40); fail_unless(mu4.type() == Message::T_HANDSHAKE); fail_unless(mu4.flags() == 0x2); fail_unless(mu4.ctrl() == 3); fail_unless(mu4.len() == 1001); } END_TEST // IST tests static gu_barrier_t start_barrier; class TestOrder { public: TestOrder(galera::TrxHandleSlave& trx) : trx_(trx) { } void lock() { } void unlock() { } wsrep_seqno_t seqno() const { return trx_.global_seqno(); } bool condition(wsrep_seqno_t last_entered, wsrep_seqno_t last_left) const { return (last_left >= trx_.depends_seqno()); } #ifdef GU_DBUG_ON void debug_sync(gu::Mutex&) { } #endif // GU_DBUG_ON private: galera::TrxHandleSlave& trx_; }; struct sender_args { gcache::GCache& gcache_; const std::string& peer_; wsrep_seqno_t first_; wsrep_seqno_t last_; int version_; sender_args(gcache::GCache& gcache, const std::string& peer, wsrep_seqno_t first, wsrep_seqno_t last, int version) : gcache_(gcache), peer_ (peer), first_ (first), last_ (last), version_(version) { } }; struct receiver_args { std::string listen_addr_; wsrep_seqno_t first_; wsrep_seqno_t last_; TrxHandleSlave::Pool& trx_pool_; gcache::GCache& gcache_; int version_; receiver_args(const std::string listen_addr, wsrep_seqno_t first, wsrep_seqno_t last, TrxHandleSlave::Pool& sp, gcache::GCache& gc, int version) : listen_addr_(listen_addr), first_ (first), last_ (last), trx_pool_ (sp), gcache_ (gc), version_ (version) { } }; extern "C" void* sender_thd(void* arg) { mark_point(); const sender_args* sargs(reinterpret_cast(arg)); gu::Config conf; galera::ReplicatorSMM::InitConfig(conf, NULL, NULL); gu_barrier_wait(&start_barrier); galera::ist::Sender sender(conf, sargs->gcache_, sargs->peer_, sargs->version_); mark_point(); sender.send(sargs->first_, sargs->last_, sargs->first_); mark_point(); return 0; } namespace { class ISTHandler : public galera::ist::EventHandler { public: ISTHandler() : mutex_(), cond_(), seqno_(0), eof_(false), error_(0) { } ~ISTHandler() {} void ist_trx(const TrxHandleSlavePtr& ts, bool must_apply, bool preload) { assert(ts != 0); ts->verify_checksum(); if (ts->state() == TrxHandle::S_ABORTING) { log_info << "ist_trx: aborting: " << ts->global_seqno(); } else { log_info << "ist_trx: " << *ts; ts->set_state(TrxHandle::S_CERTIFYING); } if (preload == false) { assert(seqno_ + 1 == ts->global_seqno()); } else { assert(seqno_ < ts->global_seqno()); } seqno_ = ts->global_seqno(); } void ist_cc(const gcs_action& act, bool must_apply, bool preload) { gcs_act_cchange const cc(act.buf, act.size); assert(act.seqno_g == cc.seqno); log_info << "ist_cc" << cc.seqno; if (preload == false) { assert(seqno_ + 1 == cc.seqno); } else { assert(seqno_ < cc.seqno); } } void ist_end(int error) { log_info << "IST ended with status: " << error; gu::Lock lock(mutex_); error_ = error; eof_ = true; cond_.signal(); } int wait() { gu::Lock lock(mutex_); while (eof_ == false) { lock.wait(cond_); } return error_; } private: gu::Mutex mutex_; gu::Cond cond_; wsrep_seqno_t seqno_; bool eof_; int error_; }; } extern "C" void* receiver_thd(void* arg) { mark_point(); receiver_args* rargs(reinterpret_cast(arg)); gu::Config conf; TrxHandleSlave::Pool slave_pool(sizeof(TrxHandleSlave), 1024, "TrxHandleSlave"); galera::ReplicatorSMM::InitConfig(conf, NULL, NULL); mark_point(); conf.set(galera::ist::Receiver::RECV_ADDR, rargs->listen_addr_); ISTHandler isth; galera::ist::Receiver receiver(conf, rargs->gcache_, slave_pool, isth, 0); // Prepare starts IST receiver thread rargs->listen_addr_ = receiver.prepare(rargs->first_, rargs->last_, rargs->version_, WSREP_UUID_UNDEFINED); gu_barrier_wait(&start_barrier); mark_point(); receiver.ready(rargs->first_); log_info << "IST wait finished with status: " << isth.wait(); receiver.finished(); return 0; } static int select_trx_version(int protocol_version) { // see protocol version table in replicator_smm.hpp switch (protocol_version) { case 1: case 2: return 1; case 3: case 4: return 2; case 5: case 6: case 7: case 8: return 3; case 9: return 4; case 10: return 5; default: fail("unsupported replicator protocol version: %n", protocol_version); } return -1; } static void store_trx(gcache::GCache* const gcache, TrxHandleMaster::Pool& lp, const TrxHandleMaster::Params& trx_params, const wsrep_uuid_t& uuid, int const i) { TrxHandleMasterPtr trx(TrxHandleMaster::New(lp, trx_params, uuid, 1234+i, 5678+i), TrxHandleMasterDeleter()); const wsrep_buf_t key[3] = { {"key1", 4}, {"key2", 4}, {"key3", 4} }; trx->append_key(KeyData(trx_params.version_, key, 3, WSREP_KEY_EXCLUSIVE, true)); trx->append_data("bar", 3, WSREP_DATA_ORDERED, true); assert (i > 0); int last_seen(i - 1); int pa_range(i); gu::byte_t* ptr(0); if (trx_params.version_ < 3) { fail("WS version %d not supported any more", trx_params.version_); } else { galera::WriteSetNG::GatherVector bufs; ssize_t trx_size(trx->gather(bufs)); mark_point(); trx->finalize(last_seen); ptr = static_cast(gcache->malloc(trx_size)); /* concatenate buffer vector */ gu::byte_t* p(ptr); for (size_t k(0); k < bufs->size(); ++k) { ::memcpy(p, bufs[k].ptr, bufs[k].size); p += bufs[k].size; } assert ((p - ptr) == trx_size); gu::Buf ws_buf = { ptr, trx_size }; mark_point(); galera::WriteSetIn wsi(ws_buf); assert (wsi.last_seen() == last_seen); assert (wsi.pa_range() == (wsi.version() < WriteSetNG::VER5 ? 0 : WriteSetNG::MAX_PA_RANGE)); wsi.set_seqno(i, pa_range); assert (wsi.seqno() == int64_t(i)); assert (wsi.pa_range() == pa_range); } gcache->seqno_assign(ptr, i, GCS_ACT_WRITESET, (i - pa_range) <= 0); } static void store_cc(gcache::GCache* const gcache, const wsrep_uuid_t& uuid, int const i) { static int conf_id(0); gcs_act_cchange cc; ::memcpy(&cc.uuid, &uuid, sizeof(uuid)); cc.seqno = i; cc.conf_id = conf_id++; void* tmp; int const cc_size(cc.write(&tmp)); void* const cc_ptr(gcache->malloc(cc_size)); fail_if(NULL == cc_ptr); memcpy(cc_ptr, tmp, cc_size); gcache->seqno_assign(cc_ptr, i, GCS_ACT_CCHANGE, i > 0); } static void test_ist_common(int const version) { using galera::KeyData; using galera::TrxHandle; using galera::KeyOS; TrxHandleMaster::Pool lp(TrxHandleMaster::LOCAL_STORAGE_SIZE(), 4, "ist_common"); TrxHandleSlave::Pool sp(sizeof(TrxHandleSlave), 4, "ist_common"); int const trx_version(select_trx_version(version)); TrxHandleMaster::Params const trx_params("", trx_version, galera::KeySet::MAX_VERSION); std::string const dir("."); gu::Config conf_sender; galera::ReplicatorSMM::InitConfig(conf_sender, NULL, NULL); std::string const gcache_sender_file("ist_sender.cache"); conf_sender.set("gcache.name", gcache_sender_file); conf_sender.set("gcache.size", "1M"); gcache::GCache* gcache_sender = new gcache::GCache(conf_sender, dir); gu::Config conf_receiver; galera::ReplicatorSMM::InitConfig(conf_receiver, NULL, NULL); std::string const gcache_receiver_file("ist_receiver.cache"); conf_receiver.set("gcache.name", gcache_receiver_file); conf_receiver.set("gcache.size", "1M"); gcache::GCache* gcache_receiver = new gcache::GCache(conf_receiver, dir); std::string receiver_addr("tcp://127.0.0.1:0"); wsrep_uuid_t uuid; gu_uuid_generate(reinterpret_cast(&uuid), 0, 0); mark_point(); // populate gcache for (size_t i(1); i <= 10; ++i) { if (i % 3) { store_trx(gcache_sender, lp, trx_params, uuid, i); } else { store_cc(gcache_sender, uuid, i); } } mark_point(); receiver_args rargs(receiver_addr, 1, 10, sp, *gcache_receiver, version); sender_args sargs(*gcache_sender, rargs.listen_addr_, 1, 10, version); gu_barrier_init(&start_barrier, 0, 2); gu_thread_t sender_thread, receiver_thread; gu_thread_create(&sender_thread, 0, &sender_thd, &sargs); mark_point(); usleep(100000); gu_thread_create(&receiver_thread, 0, &receiver_thd, &rargs); mark_point(); gu_thread_join(sender_thread, 0); gu_thread_join(receiver_thread, 0); mark_point(); delete gcache_sender; delete gcache_receiver; mark_point(); unlink(gcache_sender_file.c_str()); unlink(gcache_receiver_file.c_str()); } START_TEST(test_ist_v7) { test_ist_common(7); /* trx ver: 3, STR ver: 2, alignment: none */ } END_TEST START_TEST(test_ist_v8) { test_ist_common(8); /* trx ver: 3, STR ver: 2, alignment: 8 */ } END_TEST START_TEST(test_ist_v9) { test_ist_common(9); /* trx ver: 4, STR ver: 2, alignment: 8 */ } END_TEST START_TEST(test_ist_v10) { test_ist_common(10); /* trx ver: 5, STR ver: 3, alignment: 8 */ } END_TEST Suite* ist_suite() { Suite* s = suite_create("ist"); TCase* tc; tc = tcase_create("test_ist_message"); tcase_add_test(tc, test_ist_message); suite_add_tcase(s, tc); tc = tcase_create("test_ist_v7"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v7); tc = tcase_create("test_ist_v8"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v8); tc = tcase_create("test_ist_v9"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v9); tc = tcase_create("test_ist_v10"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test_ist_v10); suite_add_tcase(s, tc); return s; } galera-26.4.3/galera/tests/write_set_check.cpp0000664000177500017540000005121213540715002017622 0ustar dbartmy/* * Copyright (C) 2010-2018 Codership Oy */ #include "write_set.hpp" #include "mapped_buffer.hpp" #include "gu_logger.hpp" #include "certification.hpp" #include "replicator_smm.hpp" #include "wsdb.cpp" #include "gcs_action_source.hpp" #include "galera_service_thd.hpp" #include #include namespace { class TestEnv { class GCache_setup { public: GCache_setup(gu::Config& conf) : name_("write_set_test.gcache") { conf.set("gcache.name", name_); conf.set("gcache.size", "1M"); log_info << "conf for gcache: " << conf; } ~GCache_setup() { unlink(name_.c_str()); } private: std::string const name_; }; public: TestEnv() : conf_ (), init_ (conf_, NULL, NULL), gcache_setup_(conf_), gcache_ (conf_, "."), gcs_ (conf_, gcache_), thd_ (gcs_, gcache_) {} ~TestEnv() {} gu::Config& conf() { return conf_; } galera::ServiceThd& thd() { return thd_; } private: gu::Config conf_; galera::ReplicatorSMM::InitConfig init_; GCache_setup gcache_setup_; gcache::GCache gcache_; galera::DummyGcs gcs_; galera::ServiceThd thd_; }; } using namespace std; using namespace galera; typedef std::vector KeyPartSequence; START_TEST(test_key1) { static char k1[16]; static char k2[256]; static char k3[1 << 21]; static char k4[(1 << 22) - 5]; memset(k1, 0xab, sizeof(k1)); memset(k2, 0xcd, sizeof(k2)); memset(k3, 0x9e, sizeof(k3)); memset(k4, 0x8f, sizeof(k4)); const wsrep_buf_t kiovec[4] = { {k1, sizeof k1 }, {k2, sizeof k2 }, {k3, sizeof k3 }, {k4, sizeof k4 } }; KeyOS key(1, kiovec, 4, 0); size_t expected_size(0); #ifndef GALERA_KEY_VLQ expected_size += 1 + std::min(sizeof k1, size_t(0xff)); expected_size += 1 + std::min(sizeof k2, size_t(0xff)); expected_size += 1 + std::min(sizeof k3, size_t(0xff)); expected_size += 1 + std::min(sizeof k4, size_t(0xff)); expected_size += sizeof(uint16_t); #else expected_size += gu::uleb128_size(sizeof k1) + sizeof k1; expected_size += gu::uleb128_size(sizeof k2) + sizeof k2; expected_size += gu::uleb128_size(sizeof k3) + sizeof k3; expected_size += gu::uleb128_size(sizeof k4) + sizeof k4; expected_size += gu::uleb128_size(expected_size); #endif fail_unless(key.serial_size() == expected_size, "%ld <-> %ld", key.serial_size(), expected_size); KeyPartSequence kp(key.key_parts()); fail_unless(kp.size() == 4); gu::Buffer buf(key.serial_size()); key.serialize(&buf[0], buf.size(), 0); KeyOS key2(1); key2.unserialize(&buf[0], buf.size(), 0); fail_unless(key2 == key); } END_TEST START_TEST(test_key2) { static char k1[16]; static char k2[256]; static char k3[1 << 21]; static char k4[(1 << 22) - 5]; memset(k1, 0xab, sizeof(k1)); memset(k2, 0xcd, sizeof(k2)); memset(k3, 0x9e, sizeof(k3)); memset(k4, 0x8f, sizeof(k4)); const wsrep_buf_t kiovec[4] = { {k1, sizeof k1 }, {k2, sizeof k2 }, {k3, sizeof k3 }, {k4, sizeof k4 } }; KeyOS key(2, kiovec, 4, 0); size_t expected_size(0); expected_size += 1; // flags #ifndef GALERA_KEY_VLQ expected_size += 1 + std::min(sizeof k1, size_t(0xff)); expected_size += 1 + std::min(sizeof k2, size_t(0xff)); expected_size += 1 + std::min(sizeof k3, size_t(0xff)); expected_size += 1 + std::min(sizeof k4, size_t(0xff)); expected_size += sizeof(uint16_t); #else expected_size += gu::uleb128_size(sizeof k1) + sizeof k1; expected_size += gu::uleb128_size(sizeof k2) + sizeof k2; expected_size += gu::uleb128_size(sizeof k3) + sizeof k3; expected_size += gu::uleb128_size(sizeof k4) + sizeof k4; expected_size += gu::uleb128_size(expected_size); #endif fail_unless(key.serial_size() == expected_size, "%ld <-> %ld", key.serial_size(), expected_size); KeyPartSequence kp(key.key_parts()); fail_unless(kp.size() == 4); gu::Buffer buf(key.serial_size()); key.serialize(&buf[0], buf.size(), 0); KeyOS key2(2); key2.unserialize(&buf[0], buf.size(), 0); fail_unless(key2 == key); } END_TEST START_TEST(test_write_set1) { WriteSet ws(1); const wsrep_buf_t key1[2] = { {void_cast("dbt\0t1"), 6}, {void_cast("aaa") , 3} }; const wsrep_buf_t key2[2] = { {void_cast("dbt\0t2"), 6}, {void_cast("bbbb"), 4} }; const char* rbr = "rbrbuf"; size_t rbr_len = 6; log_info << "ws0 " << ws.serial_size(); ws.append_key(KeyData(1, key1, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws1 " << ws.serial_size(); ws.append_key(KeyData(1, key2, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws2 " << ws.serial_size(); ws.append_data(rbr, rbr_len); gu::Buffer rbrbuf(rbr, rbr + rbr_len); log_info << "rbrlen " << gu::serial_size4(rbrbuf); log_info << "wsrbr " << ws.serial_size(); gu::Buffer buf(ws.serial_size()); ws.serialize(&buf[0], buf.size(), 0); size_t expected_size = 4 // row key sequence size #ifndef GALERA_KEY_VLQ + 2 + 1 + 6 + 1 + 3 // key1 + 2 + 1 + 6 + 1 + 4 // key2 #else + 1 + 1 + 6 + 1 + 3 // key1 + 1 + 1 + 6 + 1 + 4 // key2 #endif + 4 + 6; // rbr fail_unless(buf.size() == expected_size, "%zd <-> %zd <-> %zd", buf.size(), expected_size, ws.serial_size()); WriteSet ws2(0); size_t ret = ws2.unserialize(&buf[0], buf.size(), 0); fail_unless(ret == expected_size); WriteSet::KeySequence rks; ws.get_keys(rks); WriteSet::KeySequence rks2; ws.get_keys(rks2); fail_unless(rks2 == rks); fail_unless(ws2.get_data() == ws.get_data()); } END_TEST START_TEST(test_write_set2) { WriteSet ws(2); const wsrep_buf_t key1[2] = { {void_cast("dbt\0t1"), 6}, {void_cast("aaa") , 3} }; const wsrep_buf_t key2[2] = { {void_cast("dbt\0t2"), 6}, {void_cast("bbbb"), 4} }; const char* rbr = "rbrbuf"; size_t rbr_len = 6; log_info << "ws0 " << ws.serial_size(); ws.append_key(KeyData(2, key1, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws1 " << ws.serial_size(); ws.append_key(KeyData(2, key2, 2, WSREP_KEY_EXCLUSIVE, true)); log_info << "ws2 " << ws.serial_size(); ws.append_data(rbr, rbr_len); gu::Buffer rbrbuf(rbr, rbr + rbr_len); log_info << "rbrlen " << gu::serial_size4(rbrbuf); log_info << "wsrbr " << ws.serial_size(); gu::Buffer buf(ws.serial_size()); ws.serialize(&buf[0], buf.size(), 0); size_t expected_size = 4 // row key sequence size #ifndef GALERA_KEY_VLQ + 2 + 1 + 1 + 6 + 1 + 3 // key1 + 2 + 1 + 1 + 6 + 1 + 4 // key2 #else + 1 + 1 + 6 + 1 + 3 // key1 + 1 + 1 + 6 + 1 + 4 // key2 #endif + 4 + 6; // rbr fail_unless(buf.size() == expected_size, "%zd <-> %zd <-> %zd", buf.size(), expected_size, ws.serial_size()); WriteSet ws2(2); size_t ret = ws2.unserialize(&buf[0], buf.size(), 0); fail_unless(ret == expected_size); WriteSet::KeySequence rks; ws.get_keys(rks); WriteSet::KeySequence rks2; ws2.get_keys(rks2); fail_unless(rks2 == rks); fail_unless(ws2.get_data() == ws.get_data()); } END_TEST START_TEST(test_mapped_buffer) { string wd("/tmp"); MappedBuffer mb(wd, 1 << 8); mb.resize(16); for (size_t i = 0; i < 16; ++i) { mb[i] = static_cast(i); } mb.resize(1 << 8); for (size_t i = 0; i < 16; ++i) { fail_unless(mb[i] == static_cast(i)); } for (size_t i = 16; i < (1 << 8); ++i) { mb[i] = static_cast(i); } mb.resize(1 << 20); for (size_t i = 0; i < (1 << 8); ++i) { fail_unless(mb[i] == static_cast(i)); } for (size_t i = 0; i < (1 << 20); ++i) { mb[i] = static_cast(i); } } END_TEST static TrxHandle::LocalPool lp(TrxHandle::LOCAL_STORAGE_SIZE(), 4, "ws_local_pool"); static TrxHandle::SlavePool sp(sizeof(TrxHandle), 4, "ws_slave_pool"); START_TEST(test_cert_hierarchical_v1) { log_info << "test_cert_hierarchical_v1"; struct wsinfo_ { wsrep_uuid_t uuid; wsrep_conn_id_t conn_id; wsrep_trx_id_t trx_id; wsrep_buf_t key[3]; size_t iov_len; wsrep_seqno_t local_seqno; wsrep_seqno_t global_seqno; wsrep_seqno_t last_seen_seqno; wsrep_seqno_t expected_depends_seqno; int flags; Certification::TestResult result; } wsi[] = { // 1 - 3, test symmetric case for dependencies // 1: no dependencies { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 1, 1, 0, 0, 0, Certification::TEST_OK}, // 2: depends on 1, no conflict { { {1, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, 2, 2, 0, 1, 0, Certification::TEST_OK}, // 3: depends on 2, no conflict { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 3, 3, 0, 2, 0, Certification::TEST_OK}, // 4 - 8, test symmetric case for conflicts // 4: depends on 3, no conflict { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 4, 4, 3, 3, 0, Certification::TEST_OK}, // 5: conflict with 4 { { {2, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, 5, 5, 3, -1, 0, Certification::TEST_FAILED}, // 6: depends on 4 (failed 5 not present in index), no conflict { { {2, } }, 1, 1, { {void_cast("1"), 1}, {void_cast("1"), 1} }, 2, 6, 6, 5, 4, 0, Certification::TEST_OK}, // 7: conflicts with 6 { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 7, 7, 5, -1, 0, Certification::TEST_FAILED}, // 8: to isolation: must not conflict, depends on global_seqno - 1 { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 8, 8, 5, 7, TrxHandle::F_ISOLATION, Certification::TEST_OK}, // 9: to isolation: must not conflict, depends on global_seqno - 1 { { {2, } }, 1, 1, { {void_cast("1"), 1}, }, 1, 9, 9, 5, 8, TrxHandle::F_ISOLATION, Certification::TEST_OK}, }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); TestEnv env; galera::Certification cert(env.conf(), env.thd()); int const version(1); cert.assign_initial_position(0, version); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); mark_point(); for (size_t i(0); i < nws; ++i) { TrxHandle* trx(TrxHandle::New(lp, trx_params, wsi[i].uuid, wsi[i].conn_id, wsi[i].trx_id)); trx->append_key(KeyData(1, wsi[i].key, wsi[i].iov_len, WSREP_KEY_EXCLUSIVE, true)); trx->set_last_seen_seqno(wsi[i].last_seen_seqno); trx->set_flags(trx->flags() | wsi[i].flags); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); log_info << "ws[" << i << "]: " << buf.size() - offset; trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, wsi[i].local_seqno, wsi[i].global_seqno); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == wsi[i].result, "wsi: %zu, g: %lld r: %d er: %d", i, trx->global_seqno(), result, wsi[i].result); fail_unless(trx->depends_seqno() == wsi[i].expected_depends_seqno, "wsi: %zu g: %lld ld: %lld eld: %lld", i, trx->global_seqno(), trx->depends_seqno(), wsi[i].expected_depends_seqno); cert.set_trx_committed(trx); trx->unref(); } } END_TEST START_TEST(test_cert_hierarchical_v2) { log_info << "test_cert_hierarchical_v2"; const int version(2); struct wsinfo_ { wsrep_uuid_t uuid; wsrep_conn_id_t conn_id; wsrep_trx_id_t trx_id; wsrep_buf_t key[3]; size_t iov_len; bool shared; wsrep_seqno_t local_seqno; wsrep_seqno_t global_seqno; wsrep_seqno_t last_seen_seqno; wsrep_seqno_t expected_depends_seqno; int flags; Certification::TestResult result; } wsi[] = { // 1 - 4: shared - shared // First four cases are shared keys, they should not collide or // generate dependency // 1: no dependencies { { {1, } }, 1, 1, { {void_cast("1"), 1}, }, 1, true, 1, 1, 0, 0, 0, Certification::TEST_OK}, // 2: no dependencies { { {1, } }, 1, 2, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 2, 2, 0, 0, 0, Certification::TEST_OK}, // 3: no dependencies { { {2, } }, 1, 3, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 3, 3, 0, 0, 0, Certification::TEST_OK}, // 4: no dependencies { { {3, } }, 1, 4, { {void_cast("1"), 1}, }, 1, true, 4, 4, 0, 0, 0, Certification::TEST_OK}, // 5: shared - exclusive // 5: depends on 4 { { {2, } }, 1, 5, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, false, 5, 5, 0, 4, 0, Certification::TEST_OK}, // 6 - 8: exclusive - shared // 6: collides with 5 { { {1, } }, 1, 6, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 6, 6, 4, -1, 0, Certification::TEST_FAILED}, // 7: collides with 5 { { {1, } }, 1, 7, { {void_cast("1"), 1}, }, 1, true, 7, 7, 4, -1, 0, Certification::TEST_FAILED}, // 8: collides with 5 { { {1, } }, 1, 8, { {void_cast("1"), 1}, {void_cast("1"), 1}, {void_cast("1"), 1}}, 3, true, 8, 8, 4, -1, 0, Certification::TEST_FAILED}, // 9 - 10: shared key shadows dependency to 5 // 9: depends on 5 { { {2, } }, 1, 9, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 9, 9, 0, 5, 0, Certification::TEST_OK}, // 10: depends on 5 { { {2, } }, 1, 10, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 10, 10, 6, 5, 0, Certification::TEST_OK}, // 11 - 13: exclusive - shared - exclusive dependency { { {2, } }, 1, 11, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, false, 11, 11, 10, 10, 0, Certification::TEST_OK}, { { {2, } }, 1, 12, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, true, 12, 12, 10, 11, 0, Certification::TEST_OK}, { { {2, } }, 1, 13, { {void_cast("1"), 1}, {void_cast("1"), 1}, }, 2, false, 13, 13, 10, 12, 0, Certification::TEST_OK}, }; size_t nws(sizeof(wsi)/sizeof(wsi[0])); TestEnv env; galera::Certification cert(env.conf(), env.thd()); cert.assign_initial_position(0, version); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); mark_point(); for (size_t i(0); i < nws; ++i) { TrxHandle* trx(TrxHandle::New(lp, trx_params, wsi[i].uuid, wsi[i].conn_id, wsi[i].trx_id)); trx->append_key(KeyData(version, wsi[i].key, wsi[i].iov_len, (wsi[i].shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE), true)); trx->set_last_seen_seqno(wsi[i].last_seen_seqno); trx->set_flags(trx->flags() | wsi[i].flags); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); log_info << "ws[" << i << "]: " << buf.size() - offset; trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, wsi[i].local_seqno, wsi[i].global_seqno); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == wsi[i].result, "g: %lld res: %d exp: %d", trx->global_seqno(), result, wsi[i].result); fail_unless(trx->depends_seqno() == wsi[i].expected_depends_seqno, "wsi: %zu g: %lld ld: %lld eld: %lld", i, trx->global_seqno(), trx->depends_seqno(), wsi[i].expected_depends_seqno); cert.set_trx_committed(trx); trx->unref(); } } END_TEST START_TEST(test_trac_726) { log_info << "test_trac_726"; const int version(2); TestEnv env; galera::Certification cert(env.conf(), env.thd()); galera::TrxHandle::Params const trx_params("", version,KeySet::MAX_VERSION); wsrep_uuid_t uuid1 = {{1, }}; wsrep_uuid_t uuid2 = {{2, }}; cert.assign_initial_position(0, version); mark_point(); wsrep_buf_t key1 = {void_cast("1"), 1}; wsrep_buf_t key2 = {void_cast("2"), 1}; { TrxHandle* trx(TrxHandle::New(lp, trx_params, uuid1, 0, 0)); trx->append_key(KeyData(version, &key1, 1, WSREP_KEY_EXCLUSIVE, true)); trx->set_last_seen_seqno(0); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, 1, 1); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == Certification::TEST_OK); cert.set_trx_committed(trx); trx->unref(); } { TrxHandle* trx(TrxHandle::New(lp, trx_params, uuid2, 0, 0)); trx->append_key(KeyData(version, &key2, 1, WSREP_KEY_EXCLUSIVE, true)); trx->append_key(KeyData(version, &key2, 1, WSREP_KEY_SHARED, true)); trx->append_key(KeyData(version, &key1, 1, WSREP_KEY_EXCLUSIVE, true)); trx->set_last_seen_seqno(0); trx->flush(0); // serialize/unserialize to verify that ver1 trx is serializable const galera::MappedBuffer& wc(trx->write_set_collection()); gu::Buffer buf(wc.size()); std::copy(&wc[0], &wc[0] + wc.size(), &buf[0]); trx->unref(); trx = TrxHandle::New(sp); size_t offset(trx->unserialize(&buf[0], buf.size(), 0)); trx->append_write_set(&buf[0] + offset, buf.size() - offset); trx->set_received(0, 2, 2); Certification::TestResult result(cert.append_trx(trx)); fail_unless(result == Certification::TEST_FAILED); cert.set_trx_committed(trx); trx->unref(); } } END_TEST Suite* write_set_suite() { Suite* s = suite_create("write_set"); TCase* tc; tc = tcase_create("test_key1"); tcase_add_test(tc, test_key1); suite_add_tcase(s, tc); tc = tcase_create("test_key2"); tcase_add_test(tc, test_key2); suite_add_tcase(s, tc); tc = tcase_create("test_write_set1"); tcase_add_test(tc, test_write_set1); suite_add_tcase(s, tc); tc = tcase_create("test_write_set2"); tcase_add_test(tc, test_write_set2); suite_add_tcase(s, tc); tc = tcase_create("test_mapped_buffer"); tcase_add_test(tc, test_mapped_buffer); suite_add_tcase(s, tc); tc = tcase_create("test_cert_hierarchical_v1"); tcase_add_test(tc, test_cert_hierarchical_v1); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_cert_hierarchical_v2"); tcase_add_test(tc, test_cert_hierarchical_v2); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_trac_726"); tcase_add_test(tc, test_trac_726); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); return s; } galera-26.4.3/galera/tests/service_thd_check.cpp0000664000177500017540000000705413540715002020121 0ustar dbartmy/* * Copyright (C) 2010-2017 Codership Oy */ #define __STDC_FORMAT_MACROS #include "../src/galera_service_thd.hpp" #include "../src/replicator_smm.hpp" #include #include #include namespace { class TestEnv { class GCache_setup { public: GCache_setup(gu::Config& conf) : name_("service_thd_check.gcache") { conf.set("gcache.name", name_); conf.set("gcache.size", "1M"); log_info << "conf for gcache: " << conf; } ~GCache_setup() { unlink(name_.c_str()); } private: std::string const name_; }; public: TestEnv() : conf_ (), init_ (conf_, NULL, NULL), gcache_setup_(conf_), gcache_ (conf_, "."), gcs_ (conf_, gcache_) {} gcache::GCache& gcache() { return gcache_; } galera::DummyGcs& gcs() { return gcs_; } private: gu::Config conf_; galera::ReplicatorSMM::InitConfig init_; GCache_setup gcache_setup_; gcache::GCache gcache_; galera::DummyGcs gcs_; }; } using namespace galera; START_TEST(service_thd1) { TestEnv env; ServiceThd* thd = new ServiceThd(env.gcs(), env.gcache()); fail_if (thd == 0); delete thd; } END_TEST #define TEST_USLEEP 1000 // 1ms #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (TEST_USLEEP); }} START_TEST(service_thd2) { TestEnv env; DummyGcs& conn(env.gcs()); ServiceThd* thd = new ServiceThd(conn, env.gcache()); gu::UUID const state_uuid(NULL, 0); fail_if (thd == 0); conn.set_last_applied(gu::GTID(state_uuid, 0)); gcs_seqno_t seqno = 1; thd->report_last_committed (seqno); thd->flush(state_uuid); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); seqno = 5; thd->report_last_committed (seqno); thd->flush(state_uuid); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); thd->report_last_committed (3); thd->flush(state_uuid); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); thd->reset(); seqno = 3; thd->report_last_committed (seqno); thd->flush(state_uuid); WAIT_FOR(conn.last_applied() == seqno); fail_if (conn.last_applied() != seqno, "seqno = %" PRId64 ", expected %" PRId64, conn.last_applied(), seqno); delete thd; } END_TEST START_TEST(service_thd3) { TestEnv env; ServiceThd* thd = new ServiceThd(env.gcs(), env.gcache()); fail_if (thd == 0); // so far for empty GCache the following should be a noop. thd->release_seqno(-1); thd->release_seqno(2345); thd->release_seqno(234645676); delete thd; } END_TEST Suite* service_thd_suite() { Suite* s = suite_create ("service_thd"); TCase* tc; tc = tcase_create ("service_thd"); tcase_add_test (tc, service_thd1); tcase_add_test (tc, service_thd2); tcase_add_test (tc, service_thd3); suite_add_tcase (s, tc); return s; } galera-26.4.3/galera/tests/galera_check.cpp0000664000177500017540000000326713540715002017057 0ustar dbartmy/* * Copyright (C) 2012-2018 Codership Oy */ #include #include #include #include /* * Suite descriptions: forward-declare and add to array */ typedef Suite* (*suite_creator_t) (void); extern Suite* data_set_suite(); extern Suite* key_set_suite(); extern Suite* write_set_ng_suite(); extern Suite* certification_suite(); //extern Suite* write_set_suite(); extern Suite* trx_handle_suite(); extern Suite* service_thd_suite(); extern Suite* ist_suite(); extern Suite* saved_state_suite(); extern Suite* defaults_suite(); static suite_creator_t suites[] = { data_set_suite, key_set_suite, write_set_ng_suite, certification_suite, trx_handle_suite, service_thd_suite, ist_suite, saved_state_suite, defaults_suite, 0 }; extern "C" { #include } #define LOG_FILE "galera_check.log" int main(int argc, char* argv[]) { bool no_fork = (argc >= 2 && std::string(argv[1]) == "nofork"); FILE* log_file = 0; if (!no_fork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); int failed = 0; for (int i = 0; suites[i] != 0; ++i) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all(sr, CK_NORMAL); failed += srunner_ntests_failed(sr); srunner_free(sr); } if (log_file != 0) fclose(log_file); printf ("Total tests failed: %d\n", failed); if (0 == failed && 0 != log_file) ::unlink(LOG_FILE); return failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-26.4.3/garb/0000775000177500017540000000000013540715002012271 5ustar dbartmygalera-26.4.3/garb/garb_logger.cpp0000664000177500017540000000213513540715002015250 0ustar dbartmy/* Copyright (C) 2011 Codership Oy */ #include "garb_logger.hpp" #include #include #include #include namespace garb { void set_logfile (const std::string& fname) { FILE* log_file = fopen (fname.c_str(), "a"); if (!log_file) { gu_throw_error (ENOENT) << "Failed to open '" << fname << "' for appending"; } gu_conf_set_log_file (log_file); } static void log_to_syslog (int level, const char* msg) { int p = LOG_NOTICE; switch (level) { case GU_LOG_FATAL: p = LOG_CRIT; break; case GU_LOG_ERROR: p = LOG_ERR; break; case GU_LOG_WARN: p = LOG_WARNING; break; case GU_LOG_INFO: p = LOG_INFO; break; case GU_LOG_DEBUG: p = LOG_DEBUG; break; } syslog (p | LOG_DAEMON, "%s", msg); } void set_syslog () { openlog ("garbd", LOG_PID, LOG_DAEMON); gu_conf_set_log_callback (log_to_syslog); } } /* namespace garb */ galera-26.4.3/garb/garb_gcs.cpp0000664000177500017540000000741013540715002014546 0ustar dbartmy/* * Copyright (C) 2011-2016 Codership Oy */ #include "garb_gcs.hpp" namespace garb { static int const REPL_PROTO_VER(127); static int const APPL_PROTO_VER(127); Gcs::Gcs (gu::Config& gconf, const std::string& name, const std::string& address, const std::string& group) : closed_ (true), gcs_ (gcs_create (reinterpret_cast(&gconf), NULL, name.c_str(), "", REPL_PROTO_VER, APPL_PROTO_VER)) { if (!gcs_) { gu_throw_fatal << "Failed to create GCS object"; } ssize_t ret = gcs_open (gcs_, group.c_str(), address.c_str(), false); if (ret < 0) { gcs_destroy (gcs_); gu_throw_error(-ret) << "Failed to open connection to group"; } closed_ = false; } Gcs::~Gcs () { if (!closed_) { log_warn << "Destroying non-closed object, bad idea"; close (); } gcs_destroy (gcs_); } void Gcs::recv (gcs_action& act) { again: ssize_t ret = gcs_recv(gcs_, &act); if (gu_unlikely(ret < 0)) { if (-ECANCELED == ret) { ret = gcs_resume_recv (gcs_); if (0 == ret) goto again; } log_fatal << "Receiving from group failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "Receiving from group failed"; } } void Gcs::request_state_transfer (const std::string& request, const std::string& donor) { gcs_seqno_t order; log_info << "Sending state transfer request: '" << request << "', size: " << request.length(); /* Need to substitute the first ':' for \0 */ ssize_t req_len = request.length() + 1 /* \0 */; char* const req_str(reinterpret_cast(::malloc( req_len + 1 /* potentially need one more \0 */))); // cppcheck-suppress nullPointer if (!req_str) { gu_throw_error (ENOMEM) << "Cannot allocate " << req_len << " bytes for state transfer request"; } ::strcpy(req_str, request.c_str()); char* column_ptr = ::strchr(req_str, ':'); if (column_ptr) { *column_ptr = '\0'; } else /* append an empty string */ { req_str[req_len] = '\0'; req_len++; } ssize_t ret; do { gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; // for garb we use the lowest str_version. ret = gcs_request_state_transfer (gcs_, 0, req_str, req_len, donor.c_str(), gu::GTID(ist_uuid, ist_seqno), order); } while (-EAGAIN == ret && (usleep(1000000), true)); free (req_str); if (ret < 0) { log_fatal << "State transfer request failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "State transfer request failed"; } } void Gcs::join (const gu::GTID& gtid, int const code) { ssize_t const ret(gcs_join (gcs_, gtid, code)); if (ret < 0) { log_fatal << "Joining group failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "Joining group failed"; } } void Gcs::set_last_applied (const gu::GTID& gtid) { (void) gcs_set_last_applied(gcs_, gtid); } void Gcs::close () { if (!closed_) { ssize_t ret = gcs_close (gcs_); if (ret < 0) { log_error << "Failed to close connection to group"; } else { closed_ = true; } } else { log_warn << "Attempt to close a closed connection"; } } } /* namespace garb */ galera-26.4.3/garb/garb_recv_loop.cpp0000664000177500017540000000630113540715002015760 0ustar dbartmy/* Copyright (C) 2011-2016 Codership Oy */ #include "garb_recv_loop.hpp" #include namespace garb { static Gcs* global_gcs(0); void signal_handler (int signum) { log_info << "Received signal " << signum; global_gcs->close(); } RecvLoop::RecvLoop (const Config& config) : config_(config), gconf_ (), params_(gconf_), parse_ (gconf_, config_.options()), gcs_ (gconf_, config_.name(), config_.address(), config_.group()), uuid_ (GU_UUID_NIL), seqno_ (GCS_SEQNO_ILL), proto_ (0) { /* set up signal handlers */ global_gcs = &gcs_; struct sigaction sa; memset (&sa, 0, sizeof(sa)); sa.sa_handler = signal_handler; if (sigaction (SIGTERM, &sa, NULL)) { gu_throw_error(errno) << "Falied to install signal handler for signal " << "SIGTERM"; } if (sigaction (SIGINT, &sa, NULL)) { gu_throw_error(errno) << "Falied to install signal handler for signal " << "SIGINT"; } loop(); } void RecvLoop::loop() { while (1) { gcs_action act; gcs_.recv (act); switch (act.type) { case GCS_ACT_WRITESET: seqno_ = act.seqno_g; if (gu_unlikely(proto_ == 0 && !(seqno_ & 127))) /* report_interval_ of 128 in old protocol */ { gcs_.set_last_applied (gu::GTID(uuid_, seqno_)); } break; case GCS_ACT_COMMIT_CUT: break; case GCS_ACT_STATE_REQ: /* we can't donate state */ gcs_.join (gu::GTID(uuid_, seqno_),-ENOSYS); break; case GCS_ACT_CCHANGE: { gcs_act_cchange const cc(act.buf, act.size); if (cc.conf_id > 0) /* PC */ { int const my_idx(act.seqno_g); assert(my_idx >= 0); gcs_node_state const my_state(cc.memb[my_idx].state_); if (GCS_NODE_STATE_PRIM == my_state) { uuid_ = cc.uuid; seqno_ = cc.seqno; gcs_.request_state_transfer (config_.sst(),config_.donor()); gcs_.join(gu::GTID(cc.uuid, cc.seqno), 0); } proto_ = gcs_.proto_ver(); } else { if (cc.memb.size() == 0) // SELF-LEAVE after closing connection { log_info << "Exiting main loop"; return; } uuid_ = GU_UUID_NIL; seqno_ = GCS_SEQNO_ILL; } if (config_.sst() != Config::DEFAULT_SST) { // we requested custom SST, so we're done here gcs_.close(); } break; } case GCS_ACT_JOIN: case GCS_ACT_SYNC: case GCS_ACT_FLOW: case GCS_ACT_VOTE: case GCS_ACT_SERVICE: case GCS_ACT_ERROR: case GCS_ACT_UNKNOWN: break; } if (act.buf) { ::free(const_cast(act.buf)); } } } } /* namespace garb */ galera-26.4.3/garb/files/0000775000177500017540000000000013540715002013373 5ustar dbartmygalera-26.4.3/garb/files/garb.service0000664000177500017540000000065113540715002015672 0ustar dbartmy# Systemd service file for garbd [Unit] Description=Galera Arbitrator Daemon After=network.target syslog.target [Install] WantedBy=multi-user.target Alias=garbd.service [Service] User=nobody EnvironmentFile=/etc/sysconfig/garb ExecStart=/usr/bin/garb-systemd start # Use SIGINT because with the default SIGTERM # garbd fails to reliably transition to 'destroyed' state KillSignal=SIGINT TimeoutSec=2m PrivateTmp=false galera-26.4.3/garb/files/garb.cnf0000664000177500017540000000077713540715002015011 0ustar dbartmy# Copyright (C) 2012 Codership Oy # This config file is to be sourced by garb service script. # A comma-separated list of node addresses (address[:port]) in the cluster # GALERA_NODES="" # Galera cluster name, should be the same as on the rest of the nodes. # GALERA_GROUP="" # Optional Galera internal options string (e.g. SSL settings) # see http://galeracluster.com/documentation-webpages/galeraparameters.html # GALERA_OPTIONS="" # Log file for garbd. Optional, by default logs to syslog # LOG_FILE="" galera-26.4.3/garb/files/freebsd/0000775000177500017540000000000013540715002015005 5ustar dbartmygalera-26.4.3/garb/files/freebsd/garb.sh0000664000177500017540000000577613540715002016273 0ustar dbartmy#!/bin/sh # # garb.sh for rc.d usage (c) 2013 Codership Oy # $Id$ # PROVIDE: garb # REQUIRE: LOGIN # KEYWORD: shutdown # # Add the following line to /etc/rc.conf to enable Galera Arbitrator Daemon (garbd): # garb_enable (bool): Set to "NO" by default. # Set it to "YES" to enable Galera Arbitrator Daemon. # garb_galera_nodes (str): A space-separated list of node addresses (address[:port]) in the cluster # (default empty). # garb_galera_group (str): Galera cluster name, should be the same as on the rest of the nodes. # (default empty). # Optional: # garb_galera_options (str): Optional Galera internal options string (e.g. SSL settings) # see http://www.codership.com/wiki/doku.php?id=galera_parameters # (default empty). # garb_log_file (str): Log file for garbd (default empty). Optional, by default logs to syslog # garb_pid_file (str): Custum PID file path and name. # Default to "/var/run/garb.pid". # . /etc/rc.subr name="garb" rcvar=garb_enable load_rc_config $name # set defaults : ${garb_enable="NO"} : ${garb_galera_nodes=""} : ${garb_galera_group=""} : ${garb_galera_options=""} : ${garb_log_file=""} : ${garb_pid_file="/var/run/garb.pid"} procname="/usr/local/bin/garbd" command="/usr/sbin/daemon" command_args="-c -f -u nobody -p $garb_pid_file $procname" start_precmd="${name}_prestart" #start_cmd="${name}_start" start_postcmd="${name}_poststart" stop_precmd="${name}_prestop" #stop_cmd="${name}_stop" #stop_postcmd="${name}_poststop" #extra_commands="reload" #reload_cmd="${name}_reload" export LD_LIBRARY_PATH=/usr/local/lib/gcc44 garb_prestart() { [ "$(id -ur)" != "0" ] && err 4 "root rights are required to start $name" [ -r "$garb_pid_file" ] && err 0 "$procname is already running with PID $(cat $garb_pid_file)" [ -x "$procname" ] || err 5 "$procname is not found" # check that node addresses are configured [ -z "$garb_galera_nodes" ] && err 6 "List of garb_galera_nodes is not configured" [ -z "$garb_galera_group" ] && err 6 "garb_galera_group name is not configured" GALERA_PORT=${GALERA_PORT:-4567} # Concatenate all nodes in the list (for backward compatibility) ADDRESS= for NODE in ${garb_galera_nodes}; do [ -z "$ADDRESS" ] && ADDRESS="$NODE" || ADDRESS="$ADDRESS,$NODE" done command_args="$command_args -a gcomm://$ADDRESS" [ -n "$garb_galera_group" ] && command_args="$command_args -g $garb_galera_group" [ -n "$garb_galera_options" ] && command_args="$command_args -o $garb_galera_options" [ -n "$garb_log_file" ] && command_args="$command_args -l $garb_log_file" return 0 } garb_poststart() { local timeout=15 while [ ! -f "$garb_pid_file" -a $timeout -gt 0 ]; do timeout=$(( timeout - 1 )) sleep 1 done return 0 } garb_prestop() { [ "$(id -ur)" != "0" ] && err 4 "root rights are required to stop $name" [ -r $garb_pid_file ] || err 0 "" return 0 } run_rc_command "$1" galera-26.4.3/garb/files/garb-systemd0000775000177500017540000000210213540715002015715 0ustar dbartmy#!/bin/bash -ue # config=/etc/sysconfig/garb log_failure() { echo " ERROR! $@" } program_start() { echo "Starting garbd" /usr/bin/garbd "$@" } start() { if grep -q -E '^# REMOVE' $config;then log_failure "Garbd config $config is not configured yet" return 0 fi [ -f $config ] && . $config # Check that node addresses are configured if [[ -z "${GALERA_NODES:-}" ]]; then log_failure "List of GALERA_NODES is not configured" return 6 fi if [[ -z "${GALERA_GROUP:-}" ]]; then log_failure "GALERA_GROUP name is not configured" return 6 fi GALERA_PORT=${GALERA_PORT:-4567} OPTIONS="-a gcomm://${GALERA_NODES// /,}" # substitute space with comma for backward compatibility [ -n "${GALERA_GROUP:-}" ] && OPTIONS="$OPTIONS -g '$GALERA_GROUP'" [ -n "${GALERA_OPTIONS:-}" ] && OPTIONS="$OPTIONS -o '$GALERA_OPTIONS'" [ -n "${LOG_FILE:-}" ] && OPTIONS="$OPTIONS -l '$LOG_FILE'" eval program_start $OPTIONS } # See how we were called. case "$1" in start) start ;; *) echo $"Usage: $0 {start}" exit 2 esac exit $? galera-26.4.3/garb/files/garb.sh0000775000177500017540000000741313540715002014652 0ustar dbartmy#!/bin/bash # # Copyright (C) 2012-2015 Codership Oy # # init.d script for garbd # # chkconfig: - 99 01 # config: /etc/sysconfig/garb | /etc/default/garb ### BEGIN INIT INFO # Provides: garb # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Should-Start: $network $named $time # Should-Stop: $network $named $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Galera Arbitrator Daemon # Description: The Galera Arbitrator is used as part of clusters # that have only two real Galera servers and need an # extra node to arbitrate split brain situations. ### END INIT INFO # On Debian Jessie, avoid redirecting calls to this script to 'systemctl start' _SYSTEMCTL_SKIP_REDIRECT=true # Source function library. if [ -f /etc/redhat-release ]; then . /etc/init.d/functions . /etc/sysconfig/network config=/etc/sysconfig/garb else . /lib/lsb/init-functions config=/etc/default/garb fi log_failure() { if [ -f /etc/redhat-release ]; then echo -n $* failure "$*" echo else log_failure_msg "$*" fi } PIDFILE=/var/run/garbd prog=$(which garbd) program_start() { local rcode if [ -f /etc/redhat-release ]; then echo -n $"Starting $prog: " daemon --user nobody $prog "$@" >/dev/null rcode=$? if [ $rcode -eq 0 ]; then pidof $prog > $PIDFILE || rcode=$? fi [ $rcode -eq 0 ] && echo_success || echo_failure echo else log_daemon_msg "Starting $prog: " start-stop-daemon --start --quiet -c nobody --background \ --exec $prog -- "$@" rcode=$? # Hack: sleep a bit to give garbd some time to fork sleep 1 if [ $rcode -eq 0 ]; then pidof $prog > $PIDFILE || rcode=$? fi log_end_msg $rcode fi return $rcode } program_stop() { local rcode if [ -f /etc/redhat-release ]; then echo -n $"Shutting down $prog: " killproc -p $PIDFILE rcode=$? [ $rcode -eq 0 ] && echo_success || echo_failure else start-stop-daemon --stop --quiet --oknodo --retry TERM/30/KILL/5 \ --pidfile $PIDFILE rcode=$? log_end_msg $rcode fi [ $rcode -eq 0 ] && rm -f $PIDFILE return $rcode } program_status() { if [ -f /etc/redhat-release ]; then status $prog else status_of_proc -p $PIDFILE "$prog" garb fi } start() { [ "$EUID" != "0" ] && return 4 [ "$NETWORKING" = "no" ] && return 1 if grep -q -E '^# REMOVE' $config; then log_failure "Garbd config $config is not configured yet" return 0 fi if [ -r $PIDFILE ]; then local PID=$(cat ${PIDFILE}) if ps -p $PID >/dev/null 2>&1; then log_failure "$prog is already running with PID $PID" return 3 # ESRCH else rm -f $PIDFILE fi fi [ -x $prog ] || return 5 [ -f $config ] && . $config # Check that node addresses are configured if [ -z "$GALERA_NODES" ]; then log_failure "List of GALERA_NODES is not configured" return 6 fi if [ -z "$GALERA_GROUP" ]; then log_failure "GALERA_GROUP name is not configured" return 6 fi GALERA_PORT=${GALERA_PORT:-4567} OPTIONS="-d -a gcomm://${GALERA_NODES// /,}" # substitute space with comma for backward compatibility [ -n "$GALERA_GROUP" ] && OPTIONS="$OPTIONS -g '$GALERA_GROUP'" [ -n "$GALERA_OPTIONS" ] && OPTIONS="$OPTIONS -o '$GALERA_OPTIONS'" [ -n "$LOG_FILE" ] && OPTIONS="$OPTIONS -l '$LOG_FILE'" eval program_start $OPTIONS } stop() { [ "$EUID" != "0" ] && return 4 [ -r $PIDFILE ] || return 3 # ESRCH program_stop } restart() { stop start } # See how we were called. case "$1" in start) start ;; stop) stop ;; status) program_status ;; restart|reload|force-reload) restart ;; condrestart) if status $prog > /dev/null; then stop start fi ;; *) echo $"Usage: $0 {start|stop|status|restart|reload}" exit 2 esac galera-26.4.3/garb/SConscript0000664000177500017540000000272413540715002014310 0ustar dbartmy# Copyright (C) 2011 Codership Oy Import('env', 'libboost_program_options') garb_env = env.Clone() # Include paths garb_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcs/src ''')) garb_env.Append(CPPFLAGS = ' -DGCS_FOR_GARB') garb_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) garb_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) garb_env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) garb_env.Prepend(LIBS=File('#/gcs/src/libgcs4garb.a')) if libboost_program_options: garb_env.Append(LIBS=libboost_program_options) # special environment for garb_config.cpp conf_env = garb_env.Clone() Import('GALERA_VER', 'GALERA_REV') conf_env.Append(CPPFLAGS = ' -DGALERA_VER=\\"' + GALERA_VER + '\\"') conf_env.Append(CPPFLAGS = ' -DGALERA_REV=\\"' + GALERA_REV + '\\"') garb = garb_env.Program(target = 'garbd', source = Split(''' garb_logger.cpp garb_gcs.cpp garb_recv_loop.cpp garb_main.cpp ''') + conf_env.SharedObject(['garb_config.cpp']) ) galera-26.4.3/garb/garb_main.cpp0000664000177500017540000000455013540715002014720 0ustar dbartmy/* Copyright (C) 2011 Codership Oy */ #include "garb_config.hpp" #include "garb_recv_loop.hpp" #include #include #include // exit() #include // setsid(), chdir() #include // open() namespace garb { void become_daemon () { if (pid_t pid = fork()) { if (pid > 0) // parent { exit(0); } else { // I guess we want this to go to stderr as well; std::cerr << "Failed to fork daemon process: " << errno << " (" << strerror(errno) << ")"; gu_throw_error(errno) << "Failed to fork daemon process"; } } // child if (setsid()<0) // become a new process leader, detach from terminal { gu_throw_error(errno) << "setsid() failed"; } if (chdir("/")) // detach from potentially removable block devices { gu_throw_error(errno) << "chdir(\"/\") failed"; } // umask(0); // A second fork ensures the process cannot acquire a controlling // terminal. if (pid_t pid = fork()) { if (pid > 0) { exit(0); } else { gu_throw_error(errno) << "Second fork failed"; } } // Close the standard streams. This decouples the daemon from the // terminal that started it. close(0); close(1); close(2); // Bind standard fds (0, 1, 2) to /dev/null for (int fd = 0; fd < 3; ++fd) { if (open("/dev/null", O_RDONLY) < 0) { gu_throw_error(errno) << "Unable to open /dev/null for fd " << fd; } } } int main (int argc, char* argv[]) { Config config(argc, argv); if (config.exit()) return 0; log_info << "Read config: " << config << std::endl; if (config.daemon()) become_daemon(); try { RecvLoop loop (config); return 0; } catch (std::exception& e) { log_fatal << "Exception in creating receive loop: " << e.what(); } catch (...) { log_fatal << "Exception in creating receive loop."; } return EXIT_FAILURE; } } /* namespace garb */ int main (int argc, char* argv[]) { try { return garb::main (argc, argv); } catch (std::exception& e) { log_fatal << e.what(); return 1; } } galera-26.4.3/garb/garb_config.hpp0000664000177500017540000000247613540715002015253 0ustar dbartmy/* Copyright (C) 2011-2013 Codership Oy */ #ifndef _GARB_CONFIG_HPP_ #define _GARB_CONFIG_HPP_ #include #include namespace garb { class Config { public: static std::string const DEFAULT_SST; // default (empty) SST request Config (int argc, char* argv[]); ~Config () {} bool daemon() const { return daemon_ ; } const std::string& name() const { return name_ ; } const std::string& address() const { return address_; } const std::string& group() const { return group_ ; } const std::string& sst() const { return sst_ ; } const std::string& donor() const { return donor_ ; } const std::string& options() const { return options_; } const std::string& cfg() const { return cfg_ ; } const std::string& log() const { return log_ ; } bool exit() const { return exit_ ; } private: bool daemon_; std::string name_; std::string address_; std::string group_; std::string sst_; std::string donor_; std::string options_; std::string log_; std::string cfg_; bool exit_; /* Exit on --help or --version */ }; /* class Config */ std::ostream& operator << (std::ostream&, const Config&); } /* namespace garb */ #endif /* _GARB_CONFIG_HPP_ */ galera-26.4.3/garb/garb_logger.hpp0000664000177500017540000000044413540715002015256 0ustar dbartmy/* Copyright (C) 2011 Codership Oy */ #ifndef _GARB_LOGGER_HPP_ #define _GARB_LOGGER_HPP_ #include namespace garb { extern void set_logfile (const std::string& fname); extern void set_syslog (); } /* namespace garb */ #endif /* _GARB_LOGGER_HPP_ */ galera-26.4.3/garb/garb_config.cpp0000664000177500017540000001133113540715002015234 0ustar dbartmy/* Copyright (C) 2011-2013 Codership Oy */ #include "garb_config.hpp" #include "garb_logger.hpp" #include #include #include #include #include #include namespace po = boost::program_options; #include #include namespace garb { static void strip_quotes(std::string& s) { /* stripping no more than one pair of quotes */ if ('"' == *s.begin() && '"' == *s.rbegin()) { std::string stripped(s.substr(1, s.length() - 2)); s = stripped; } } std::string const Config::DEFAULT_SST(WSREP_STATE_TRANSFER_TRIVIAL); Config::Config (int argc, char* argv[]) : daemon_ (false), name_ (GCS_ARBITRATOR_NAME), address_ (), group_ ("my_test_cluster"), sst_ (DEFAULT_SST), donor_ (), options_ (), log_ (), cfg_ (), exit_ (false) { po::options_description other ("Other options"); other.add_options() ("version,v", "Print version & exit") ("help,h", "Show help message & exit") ; // only these are read from cfg file po::options_description config ("Configuration"); config.add_options() ("daemon,d", "Become daemon") ("name,n", po::value(&name_), "Node name") ("address,a",po::value(&address_), "Group address") ("group,g", po::value(&group_), "Group name") ("sst", po::value(&sst_), "SST request string") ("donor", po::value(&donor_), "SST donor name") ("options,o",po::value(&options_), "GCS/GCOMM option list") ("log,l", po::value(&log_), "Log file") ; po::options_description cfg_opt; cfg_opt.add_options() ("cfg,c", po::value(&cfg_), "Configuration file") ; // these are accepted on the command line po::options_description cmdline_opts; cmdline_opts.add(config).add(cfg_opt).add(other); // we can submit address without option po::positional_options_description p; p.add("address", -1); po::variables_map vm; store(po::command_line_parser(argc, argv). options(cmdline_opts).positional(p).run(), vm); notify(vm); if (vm.count("help")) { std::cerr << "\nUsage: " << argv[0] << " [options] [group address]\n" << cmdline_opts << std::endl; exit_= true; return; } if (vm.count("version")) { log_info << GALERA_VER << ".r" << GALERA_REV; exit_= true; return; } if (vm.count("cfg")) { std::ifstream ifs(cfg_.c_str()); if (!ifs.good()) { gu_throw_error(ENOENT) << "Failed to open configuration file '" << cfg_ << "' for reading."; } store(parse_config_file(ifs, config), vm); notify(vm); } if (!vm.count("address")) { gu_throw_error(EDESTADDRREQ) << "Group address not specified"; } if (!vm.count("group")) { gu_throw_error(EDESTADDRREQ) << "Group name not specified"; } if (vm.count("daemon")) { daemon_ = true; } /* Seeing how https://svn.boost.org/trac/boost/ticket/850 is fixed long and * hard, it becomes clear what an undercooked piece of... cake(?) boost is. * - need to strip quotes manually if used in config file. * (which is done in a very simplistic manner, but should work for most) */ strip_quotes(name_); strip_quotes(address_); strip_quotes(group_); strip_quotes(sst_); strip_quotes(donor_); strip_quotes(options_); strip_quotes(log_); strip_quotes(cfg_); if (options_.length() > 0) options_ += "; "; options_ += "gcs.fc_limit=9999999; gcs.fc_factor=1.0; gcs.fc_master_slave=yes"; // this block must be the very last. gu_conf_self_tstamp_on(); if (vm.count("log")) { set_logfile (log_); } else if (daemon_) /* if no log file given AND daemon operation requested - * log to syslog */ { gu_conf_self_tstamp_off(); set_syslog(); } gu_crc32c_configure(); } std::ostream& operator << (std::ostream& os, const Config& c) { os << "\n\tdaemon: " << c.daemon() << "\n\tname: " << c.name() << "\n\taddress: " << c.address() << "\n\tgroup: " << c.group() << "\n\tsst: " << c.sst() << "\n\tdonor: " << c.donor() << "\n\toptions: " << c.options() << "\n\tcfg: " << c.cfg() << "\n\tlog: " << c.log(); return os; } } galera-26.4.3/garb/garb_recv_loop.hpp0000664000177500017540000000207313540715002015767 0ustar dbartmy/* Copyright (C) 2011-2016 Codership Oy */ #ifndef _GARB_RECV_LOOP_HPP_ #define _GARB_RECV_LOOP_HPP_ #include "garb_gcs.hpp" #include "garb_config.hpp" #include #include #include namespace garb { class RecvLoop { public: RecvLoop (const Config&); ~RecvLoop () {} private: void loop(); const Config& config_; gu::Config gconf_; struct RegisterParams { RegisterParams(gu::Config& cnf) { gu::ssl_register_params(cnf); if (gcs_register_params(reinterpret_cast(&cnf))) { gu_throw_fatal << "Error initializing GCS parameters"; } } } params_; struct ParseOptions { ParseOptions(gu::Config& cnf, const std::string& opt) { cnf.parse(opt); } } parse_; Gcs gcs_; gu::UUID uuid_; gu::seqno_t seqno_; int proto_; }; /* RecvLoop */ } /* namespace garb */ #endif /* _GARB_RECV_LOOP_HPP_ */ galera-26.4.3/garb/garb_gcs.hpp0000664000177500017540000000153713540715002014557 0ustar dbartmy/* Copyright (C) 2011-2016 Codership Oy */ #ifndef _GARB_GCS_HPP_ #define _GARB_GCS_HPP_ #include #include namespace garb { class Gcs { public: Gcs (gu::Config& conf, const std::string& name, const std::string& address, const std::string& group); ~Gcs (); void recv (gcs_action& act); void request_state_transfer (const std::string& request, const std::string& donor); void join (const gu::GTID&, int code); void set_last_applied(const gu::GTID&); int proto_ver() const { return gcs_proto_ver(gcs_); } void close (); private: bool closed_; gcs_conn_t* gcs_; Gcs (const Gcs&); Gcs& operator= (const Gcs&); }; /* class Gcs */ } /* namespace garb */ #endif /* _GARB_GCS_HPP_ */ galera-26.4.3/debian/0000775000177500017540000000000013540715002012600 5ustar dbartmygalera-26.4.3/debian/galera-arbitrator-4.garb.default0000777000177500017540000000000013540715002024451 2../garb/files/garb.cnfustar dbartmygalera-26.4.3/debian/gbp.conf0000664000177500017540000000017713540715002014224 0ustar dbartmy[DEFAULT] # native package configuration debian-branch = 3.x upstream-branch = 3.x upstream-tree = branch pristine-tar = False galera-26.4.3/debian/control0000664000177500017540000000545513540715002014214 0ustar dbartmySource: galera-4 Maintainer: Codership Oy Uploaders: Otto KekУЄlУЄinen Section: database Priority: optional Standards-Version: 3.9.6 Build-Depends: check, debhelper (>= 9), libasio-dev, libboost-dev (>= 1.41), libboost-program-options-dev (>= 1.41), libssl-dev, scons (>= 2) Homepage: http://www.galeracluster.com/ Vcs-Git: git://github.com/codership/galera.git Vcs-Browser: http://github.com/codership/galera.git Package: galera-4 Architecture: any Section: libs Depends: ${misc:Depends}, ${shlibs:Depends} Conflicts: garbd-2, garbd2, garbd-3, garbd3, percona-galera-3, percona-xtradb-cluster-galera, percona-xtradb-cluster-galera-2.x, percona-xtradb-cluster-galera-3.x, percona-xtradb-cluster-galera-4.x, percona-xtradb-cluster-garbd-2.x, percona-xtradb-cluster-garbd-3.x, percona-xtradb-cluster-garbd-4.x, galera-3 Breaks: galera Replaces: galera Provides: galera-4, wsrep, galera, galera4, percona-xtradb-cluster-galera-26 Description: Replication framework for transactional applications Galera is a fast synchronous multimaster wsrep provider (replication engine) for transactional databases and similar applications. For more information about wsrep API see http://launchpad.net/wsrep. For a description of Galera replication engine see http://galeracluster.com. . This package contains the Galera library/plugin. Package: galera-4-dbg Architecture: any Section: debug Priority: extra Depends: galera-4 (= ${binary:Version}), ${misc:Depends} Description: debugging symbols for galera-4 This package contains the debugging symbols for galera-4. Package: galera-arbitrator-4 Architecture: any Conflicts: garbd-2, garbd2, percona-xtradb-cluster-garbd-2.x, galera-arbitrator-3 Breaks: percona-xtradb-cluster-galera-2.x Replaces: percona-xtradb-cluster-galera-2.x Depends: ${misc:Depends}, ${shlibs:Depends} Description: Galera arbitrator daemon Galera is a fast synchronous multimaster wsrep provider (replication engine) for transactional databases and similar applications. For more information about wsrep API see http://launchpad.net/wsrep. For a description of Galera replication engine see http://galeracluster.com. . This package contains the Galera arbitrator daemon (garbd). Package: galera-arbitrator-4-dbg Architecture: any Section: debug Priority: extra Conflicts: percona-xtradb-cluster-galera-2.x-dbg Depends: galera-arbitrator-4 (= ${binary:Version}), ${misc:Depends} Description: debugging symbols for galera-arbitrator-4 This package contains the debugging symbols for galera-arbitrator-4. galera-26.4.3/debian/galera-4.docs0000664000177500017540000000004013540715002015040 0ustar dbartmyscripts/packages/README AUTHORS galera-26.4.3/debian/compat0000664000177500017540000000000213540715002013776 0ustar dbartmy9 galera-26.4.3/debian/galera-arbitrator-4.install0000664000177500017540000000002313540715002017726 0ustar dbartmygarb/garbd usr/bin galera-26.4.3/debian/galera-4.links0000664000177500017540000000007113540715002015234 0ustar dbartmyusr/lib/galera/libgalera_smm.so usr/lib/libgalera_smm.so galera-26.4.3/debian/changelog0000664000177500017540000000172613540715002014460 0ustar dbartmygalera-4 (26.4.3-1) UNRELEASED; URGENCY=low * Galera 4 version 26.4.3 -- Codership Oy Fri, 12 Sep 2019 17:46:47 +0200 galera-4 (26.4.2-1) UNRELEASED; URGENCY=low * Galera 4 version 26.4.2 -- Codership Oy Fri, 12 Feb 2019 11:01:10 +0200 galera-4 (26.4.1-1) UNRELEASED; URGENCY=low * Galera 4 release candidate -- Codership Oy Fri, 22 Feb 2019 08:29:49 +0200 galera-4 (26.4.0-1) UNRELEASED; URGENCY=low * First Galera 4 release -- Codership Oy Tue, 13 Nov 2018 15:10:04 +0200 galera-3 (25.3.14-1) UNRELEASED; urgency=low * Sync Debian packaging from downstream to include improvements developed at http://anonscm.debian.org/cgit/pkg-mysql/galera-3.git -- Codership Oy Fri, 01 Jan 2016 16:02:54 +0200 galera-3 (25.3.6-1) UNRELEASED; urgency=low * First deb release. -- Codership Oy Fri, 01 Aug 2014 14:36:49 +0300 galera-26.4.3/debian/rules0000775000177500017540000000407513540715002013666 0ustar dbartmy#!/usr/bin/make -f export DH_VERBOSE=1 export DEB_BUILD_HARDENING=1 DPKG_EXPORT_BUILDFLAGS = 1 # The following is not available on Debian6 #include /usr/share/dpkg/buildflags.mk # Parallel build support as adviced # at https://www.debian.org/doc/debian-policy/ch-source.html#s-debianrules-options ifneq (,$(filter parallel=%,$(DEB_BUILD_OPTIONS))) NUMJOBS = $(patsubst parallel=%,%,$(filter parallel=%,$(DEB_BUILD_OPTIONS))) SCONS_ARGS += -j $(NUMJOBS) # Don't use MAKEFLAGS as it has strange 'w' and '--jobserver-fds=3,4' by default endif # Add support for verbose builds ifneq (,$(filter verbose,$(DEB_BUILD_OPTIONS))) SCONS_ARGS += VERBOSE=1 endif # Run tests by default ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) SCONS_ARGS += tests=1 deterministic_tests=1 endif # Use strict compilation flags (ie -Werror) if requested ifneq (,$(filter strict_build_flags,$(DEB_BUILD_OPTIONS))) SCONS_ARGS += strict_build_flags=1 endif # Galera Version ifneq (,$(filter version=%,$(DEB_BUILD_OPTIONS))) VERSION = $(filter version=%,$(DEB_BUILD_OPTIONS)) SCONS_ARGS += $(VERSION) endif # Galera revision number ifneq (,$(filter revno=%,$(DEB_BUILD_OPTIONS))) REVNO = $(filter revno=%,$(DEB_BUILD_OPTIONS)) SCONS_ARGS += $(REVNO) endif override_dh_auto_build: # Print build env info to help debug builds on different platforms dpkg-architecture @echo $(SCONS_ARGS) scons $(SCONS_ARGS) || touch FAIL # Print config.log if build fails @echo '*****************************************' @echo '** config.log contents for debugging **' @echo '*****************************************' @cat config.log @echo '*****************************************' if [ -f FAIL ]; then exit 1; fi # Start earlier than MySQL which has value 19 override_dh_installinit-arch: dh_installinit -n --name=garb -- defaults 18 22 override_dh_strip: dh_strip -pgalera-4 --dbg-package=galera-4-dbg dh_strip -pgalera-arbitrator-4 --dbg-package=galera-arbitrator-4-dbg override_dh_shlibdeps: dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info %: dh $@ .PHONY: override_dh_strip galera-26.4.3/debian/galera-arbitrator-4.garb.init0000777000177500017540000000000013540715002023634 2../garb/files/garb.shustar dbartmygalera-26.4.3/debian/README.Maintainer0000664000177500017540000000035113540715002015545 0ustar dbartmyBuild the native Debian packaging using: $ cd galera $ git clean -d -f && git reset --hard # clean up any cruft from previous builds $ dpkg-buildpackage -us -uc -b The galera/scripts/packages/deb.sh is going to be deprecated soon. galera-26.4.3/debian/copyright0000664000177500017540000001320113540715002014530 0ustar dbartmyFormat: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: Galera replication - a write set (wsrep) provider Upstream-Contact: Codership Oy Source: http://github.com/codership/galera Comments: Codership have added an additional clause to cover use of OpenSSL with Galera under the GPL-2 license. This is committed to the 2.x branch upstream: . http://bazaar.launchpad.net/~codership/galera/2.x/revision/167 Files: * Copyright: 2007-2015 Codership Oy License: GPL-2 On Debian based systems the full text of the GNU General Public License version 2 can be found in the file `/usr/share/common-licenses/GPL-2`. . In addition, as a special exception, the copyright holders give permission to link the code of portions of this program with the OpenSSL project's "OpenSSL" library (or with modified versions of it that use the same license as the "OpenSSL" library), and distribute the linked executables. You must obey the GNU General Public License in all respects for all of the code used other than "OpenSSL". If you modify this file, you may extend this exception to your version of the file, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. Files: asio/* Copyright: 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com), 2008 Rep Invariant Systems, Inc. (info@repinvariant.com), 2005 Stefan Arentz (stefan at soze dot com) License: other Boost Software License - Version 1.0 - August 17th, 2003 . Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: . The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Files: docs/* Copyright: 2014 Codership Oy 2007-2011 by the Sphinx team, see AUTHORS License: GFDL-1.1+ or CC-BY-SA-3.0 Files: chromium/* Copyright: 2012 The Chromium Authors as listed in AUTHORS file License: BSD-3-clause Files: www.evanjones.ca/* Copyright: 2008,2009,2010 Massachusetts Institute of Technology 2004-2006 Intel Corporation License: BSD-3-clause License: BSD-3-clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: GFDL-1.1+ Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License (GFDL), Version 1.1 or any later version published by the Free Software Foundation with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. . The full text of the GFDL is distributed as in /usr/share/common-licenses/GFDL on Debian systems. License: CC-BY-SA-3.0 You are free: to Share (to copy, distribute and transmit the work) and to Remix (to adapt the work) under the following conditions: . Attribution т€” You must attribute the work in the manner specified by the author or licensor (but not in any way that suggests that they endorse you or your use of the work). . Share Alike т€” If you alter, transform, or build upon this work, you may distribute the resulting work only under the same, similar or a compatible license. . For more information, see http://creativecommons.org/licenses/by-sa/3.0/ galera-26.4.3/debian/galera-4.install0000664000177500017540000000004013540715002015556 0ustar dbartmylibgalera_smm.so usr/lib/galera galera-26.4.3/debian/source/0000775000177500017540000000000013540715002014100 5ustar dbartmygalera-26.4.3/debian/source/format0000664000177500017540000000001413540715002015306 0ustar dbartmy3.0 (quilt) galera-26.4.3/debian/galera-arbitrator-4.manpages0000664000177500017540000000001413540715002020053 0ustar dbartmyman/garbd.8 galera-26.4.3/chromium/0000775000177500017540000000000013540715002013201 5ustar dbartmygalera-26.4.3/chromium/build_config.h0000664000177500017540000000244613540715002016004 0ustar dbartmy// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file adds defines about the platform we're currently building on. // Operating System: // OS_WIN / OS_MACOSX / OS_LINUX / OS_POSIX (MACOSX or LINUX) // Compiler: // COMPILER_MSVC / COMPILER_GCC // Processor: // ARCH_CPU_X86 / ARCH_CPU_X86_64 / ARCH_CPU_X86_FAMILY (X86 or X86_64) // ARCH_CPU_32_BITS / ARCH_CPU_64_BITS #ifndef BUILD_BUILD_CONFIG_H_ #define BUILD_BUILD_CONFIG_H_ // A set of macros to use for platform detection. #if defined(__native_client__) #define OS_NACL 1 #elif defined(__ANDROID__) #define OS_ANDROID 1 #endif // Compiler detection. #if defined(__GNUC__) #define COMPILER_GCC 1 #elif defined(_MSC_VER) #define COMPILER_MSVC 1 #else #error Please add support for your compiler in build/build_config.h #endif // Processor architecture detection. For more info on what's defined, see: // http://msdn.microsoft.com/en-us/library/b0084kay.aspx // http://www.agner.org/optimize/calling_conventions.pdf // or with gcc, run: "echo | gcc -E -dM -" #if defined(_M_X64) || defined(__x86_64__) || defined(_M_IX86) || defined(__i386__) #define ARCH_CPU_X86_FAMILY 1 #endif #endif // BUILD_BUILD_CONFIG_H_ galera-26.4.3/chromium/AUTHORS0000664000177500017540000002207513540715002014257 0ustar dbartmy# Names should be added to this file with this pattern: # # For individuals: # Name # # For organizations: # Organization # # See python fnmatch module documentation for more information. The Chromium Authors <*@chromium.org> Google Inc. <*@google.com> Mohamed I. Hammad Sergiy Byelozyorov Seo Sanghyeon Alex Scheele Andrew Brampton PaweХ‚ Hajdan jr Jesse Miller Szymon Piechowicz James Vega Marco Rodrigues Matthias Reitinger Peter Bright Arthur Lussos Masahiro Yado Yarin Kaul Gaetano Mendola Comodo CA Limited Torchmobile Inc. Craig Schlenter Ibrar Ahmed Naoki Takano Fabien Tassin Kunal Thakar Mohamed Mansour Joshua Roesslein Yong Shin Laszlo Radanyi Raman Tenneti Kyle Nahrgang Kim Christensen Paul Robinson JosuУЉ Ratelle Edward Crossman Nikita Ofitserov Sean Bryant Robert Sesek Janwar Dinata Will Hirsch Yoav Zilberberg Joel Stanley Jacob Mandelson Yuri Gorobets Paul Wicks Thiago Farina Viet-Trung Luu Pierre-Antoine LaFayette Song YeWen Philippe Beauchamp Vedran Х ajatoviФ‡ Randy Posynick Bruno Calvignac Jaime Soriano Pastor Bryan Donlan Ramkumar Ramachandra Dominic Jodoin Kaspar Brand Clemens Fruhwirth Kevin Lee Helpingstine Bernard Cafarelli Vernon Tang Alexander Sulfrian Philippe Beaudoin Mark Hahnenberg Alex Gartrell Leith Bade James Choi Paul Kehrer Chamal De Silva Jay Soffian Brian G. Merrell Matthew Willis Brian Merrell, Novell Inc. Ryan Sleevi Satoshi Matsuzaki Benjamin Jemlich Ningxin Hu James Wei Haitao Feng Jared Wein Mingmin Xie Michael Gilbert Giuseppe Iuculano James Willcox Shreyas VA Steven Pennington Jorge Villatoro Paul Nettleship David Benjamin Sevan Janiyan Peter Beverloo Lauri Oherd Ben Karel Sam McDonald Magnus Danielsson Kushal Pisavadia Maarten Lankhorst Vipul Bhasin Ryan Norton Dillon Sellars Seshadri Mahalingam Clement Scheelfeldt Skau David Futcher Ramkumar Gokarnesan Matt Arpidone ruben NVIDIA Corporation <*@nvidia.com> Torsten Kurbad Max Perepelitsyn Luke Zarko Felix H. Dahlke Ali Vathi Mathias Bynens Mark Seaborn Amruth Raj Amruth Raj Gajendra Singh Ehsan Akhgari Christopher Dale Sanjoy Pal Mike Tilburg Peter Brophy Robert Goldberg Don Woodward Vinay Anantharaman Naveen Bobbili Vamshikrishna Yellenki Robert Nagy Nayan Kumar K ShankarGanesh K Goutham Jagannatha Rosen Dash Naveen Bobbili Ravi Phaneendra Kasibhatla Rosen Dash Parag Radke Ted Vessenes Yair Yogev Chandra Shekar Vallala Patrasciuc Sorin Cristian Halton Huo Shiliu Wang Gao Chun Clinton Staley Clinton Staley Devlin Cronin Junmin Zhu Cem Kocagil YoungKi Hong Lu Guanqun FranУЇois Beaufort Eriq Augustine Francois Kritzinger Erik Hill Mao Yujie Pan Deng Aaron Leventhal Peter Collingbourne Aaron Randolph Yumikiyo Osanai Matthew Robertson Mao Yujie Xu Samuel Jin Yang Xinchao He Changbin Shao Stephen Searles Arun Mankuzhi Christophe Dumez Taylor Price Alexandru Chiculita Eric Rescorla Alexandre Abreu Erik SjУЖlund Simon Arlott Alexey Korepanov Mitchell Rosen Yongsheng Zhu Shouqun Liu Kangyuan Shu Jake Helfert Hongbo Min Anastasios Cassiotis Evangelos Foutras Pavel Ivanov Rene Bolldorf Petar Jovanovic Sergio Carlos Morales Angeles Mihai Maerean Kaustubh Atrawalkar Robert Bear Travis Robert Bear Travis Max Vujovic Jakob Weigert Catalin Badea Joshua Lock Dai Chunyang Joe Thomas Ruben Terrazas Josh Triplett Qiankun Miao Etienne Laurin Yang Gu Timo Reimann Sungguk Lim Martin Bednorz Kamil Jiwa Keene Pan Trevor Perrin Ion Rosca Sylvain Zimmer Sungmann Cho ц–ЙшЇ‰ (Fang Jue) Evan Peterson J. Ryan Stinnett Matheus Bratfisch Horia Olaru Horia Olaru Opera Software ASA <*@opera.com> Johannes Rudolph Aaron Jacobs Sam Larison Jun Jiang Bobby Powers Patrick Riordan Kenneth Rohde Christiansen Raphael Kubo da Costa Yandex LLC <*@yandex-team.ru> Yoshinori Sano Mrunal Kapade Yael Aharon Haojian Wu Sathish Kuppuswamy Joone Hur Sudarsana Babu Nagineni Jared Shumway Shez Baig galera-26.4.3/chromium/compiler_specific.h0000664000177500017540000001440213540715002017032 0ustar dbartmy// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef BASE_COMPILER_SPECIFIC_H_ #define BASE_COMPILER_SPECIFIC_H_ #include "build_config.h" #if defined(COMPILER_MSVC) // Macros for suppressing and disabling warnings on MSVC. // // Warning numbers are enumerated at: // http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx // // The warning pragma: // http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx // // Using __pragma instead of #pragma inside macros: // http://msdn.microsoft.com/en-us/library/d9x1s805.aspx // MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and // for the next line of the source file. #define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n)) // MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled. // The warning remains disabled until popped by MSVC_POP_WARNING. #define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \ __pragma(warning(disable:n)) // MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level. The level // remains in effect until popped by MSVC_POP_WARNING(). Use 0 to disable all // warnings. #define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n)) // Pop effects of innermost MSVC_PUSH_* macro. #define MSVC_POP_WARNING() __pragma(warning(pop)) #define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off)) #define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on)) // DEPRECATED // // Prior to r83840 this was used to supress warning C4355 when using |this| as // an argument in constructor initializer lists: // http://msdn.microsoft.com/en-us/library/3c594ae3(VS.80).aspx // // C4355 is supressed globally during compilation and existing uses of this // macro should be removed. Refer to http://crbug.com/234765 for details. #define ALLOW_THIS_IN_INITIALIZER_LIST(code) code // Allows exporting a class that inherits from a non-exported base class. // This uses suppress instead of push/pop because the delimiter after the // declaration (either "," or "{") has to be placed before the pop macro. // // Example usage: // class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) { // // MSVC Compiler warning C4275: // non dll-interface class 'Bar' used as base for dll-interface class 'Foo'. // Note that this is intended to be used only when no access to the base class' // static data is done through derived classes or inline methods. For more info, // see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx #define NON_EXPORTED_BASE(code) MSVC_SUPPRESS_WARNING(4275) \ code #else // Not MSVC #define MSVC_SUPPRESS_WARNING(n) #define MSVC_PUSH_DISABLE_WARNING(n) #define MSVC_PUSH_WARNING_LEVEL(n) #define MSVC_POP_WARNING() #define MSVC_DISABLE_OPTIMIZE() #define MSVC_ENABLE_OPTIMIZE() #define ALLOW_THIS_IN_INITIALIZER_LIST(code) code #define NON_EXPORTED_BASE(code) code #endif // COMPILER_MSVC // Annotate a variable indicating it's ok if the variable is not used. // (Typically used to silence a compiler warning when the assignment // is important for some other reason.) // Use like: // int x ALLOW_UNUSED = ...; #if defined(COMPILER_GCC) #define ALLOW_UNUSED __attribute__((unused)) #else #define ALLOW_UNUSED #endif // Annotate a function indicating it should not be inlined. // Use like: // NOINLINE void DoStuff() { ... } #if defined(COMPILER_GCC) #define NOINLINE __attribute__((noinline)) #elif defined(COMPILER_MSVC) #define NOINLINE __declspec(noinline) #else #define NOINLINE #endif // Specify memory alignment for structs, classes, etc. // Use like: // class ALIGNAS(16) MyClass { ... } // ALIGNAS(16) int array[4]; #if defined(COMPILER_MSVC) #define ALIGNAS(byte_alignment) __declspec(align(byte_alignment)) #elif defined(COMPILER_GCC) #define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) #endif // Return the byte alignment of the given type (available at compile time). Use // sizeof(type) prior to checking __alignof to workaround Visual C++ bug: // http://goo.gl/isH0C // Use like: // ALIGNOF(int32) // this would be 4 #if defined(COMPILER_MSVC) #define ALIGNOF(type) (sizeof(type) - sizeof(type) + __alignof(type)) #elif defined(COMPILER_GCC) #define ALIGNOF(type) __alignof__(type) #endif // Annotate a virtual method indicating it must be overriding a virtual // method in the parent class. // Use like: // virtual void foo() OVERRIDE; #if defined(COMPILER_MSVC) #define OVERRIDE override #elif defined(__clang__) #define OVERRIDE override #else #define OVERRIDE #endif // Annotate a function indicating the caller must examine the return value. // Use like: // int foo() WARN_UNUSED_RESULT; // To explicitly ignore a result, see |ignore_result()| in . #if defined(COMPILER_GCC) #define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #else #define WARN_UNUSED_RESULT #endif // Tell the compiler a function is using a printf-style format string. // |format_param| is the one-based index of the format string parameter; // |dots_param| is the one-based index of the "..." parameter. // For v*printf functions (which take a va_list), pass 0 for dots_param. // (This is undocumented but matches what the system C headers do.) #if defined(COMPILER_GCC) #define PRINTF_FORMAT(format_param, dots_param) \ __attribute__((format(printf, format_param, dots_param))) #else #define PRINTF_FORMAT(format_param, dots_param) #endif // WPRINTF_FORMAT is the same, but for wide format strings. // This doesn't appear to yet be implemented in any compiler. // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 . #define WPRINTF_FORMAT(format_param, dots_param) // If available, it would look like: // __attribute__((format(wprintf, format_param, dots_param))) // MemorySanitizer annotations. #ifdef MEMORY_SANITIZER extern "C" { void __msan_unpoison(const void *p, unsigned long s); } // extern "C" // Mark a memory region fully initialized. // Use this to annotate code that deliberately reads uninitialized data, for // example a GC scavenging root set pointers from the stack. #define MSAN_UNPOISON(p, s) __msan_unpoison(p, s) #else // MEMORY_SANITIZER #define MSAN_UNPOISON(p, s) #endif // MEMORY_SANITIZER #endif // BASE_COMPILER_SPECIFIC_H_ galera-26.4.3/chromium/LICENSE0000664000177500017540000000303313540715002014205 0ustar dbartmy// Copyright (c) 2013 The Chromium Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. galera-26.4.3/chromium/compile_assert.h0000664000177500017540000000516413540715002016371 0ustar dbartmy// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // Codership: this is a cut down version of base/basictypes.h to comtain // COMPILE_ASSERT only #ifndef COMPILE_ASSERT_H_ #define COMPILE_ASSERT_H_ // The COMPILE_ASSERT macro can be used to verify that a compile time // expression is true. For example, you could use it to verify the // size of a static array: // // COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES, // content_type_names_incorrect_size); // // or to make sure a struct is smaller than a certain size: // // COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large); // // The second argument to the macro is the name of the variable. If // the expression is false, most compilers will issue a warning/error // containing the name of the variable. namespace chromium { template struct CompileAssert { }; } // namespace chromium #undef CHROMIUM_COMPILE_ASSERT #define CHROMIUM_COMPILE_ASSERT(expr, msg) \ typedef chromium::CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] // Implementation details of COMPILE_ASSERT: // // - COMPILE_ASSERT works by defining an array type that has -1 // elements (and thus is invalid) when the expression is false. // // - The simpler definition // // #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1] // // does not work, as gcc supports variable-length arrays whose sizes // are determined at run-time (this is gcc's extension and not part // of the C++ standard). As a result, gcc fails to reject the // following code with the simple definition: // // int foo; // COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is // // not a compile-time constant. // // - By using the type CompileAssert<(bool(expr))>, we ensures that // expr is a compile-time constant. (Template arguments must be // determined at compile-time.) // // - The outer parentheses in CompileAssert<(bool(expr))> are necessary // to work around a bug in gcc 3.4.4 and 4.0.1. If we had written // // CompileAssert // // instead, these compilers will refuse to compile // // COMPILE_ASSERT(5 > 0, some_message); // // (They seem to think the ">" in "5 > 0" marks the end of the // template argument list.) // // - The array size is (bool(expr) ? 1 : -1), instead of simply // // ((expr) ? 1 : -1). // // This is to avoid running into a bug in MS VC 7.1, which // causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. #endif // COMPILE_ASSERT_H_ galera-26.4.3/chromium/aligned_memory.h0000664000177500017540000001150613540715002016350 0ustar dbartmy// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // AlignedMemory is a POD type that gives you a portable way to specify static // or local stack data of a given alignment and size. For example, if you need // static storage for a class, but you want manual control over when the object // is constructed and destructed (you don't want static initialization and // destruction), use AlignedMemory: // // static AlignedMemory my_class; // // // ... at runtime: // new(my_class.void_data()) MyClass(); // // // ... use it: // MyClass* mc = my_class.data_as(); // // // ... later, to destruct my_class: // my_class.data_as()->MyClass::~MyClass(); // // Alternatively, a runtime sized aligned allocation can be created: // // float* my_array = static_cast(AlignedAlloc(size, alignment)); // // // ... later, to release the memory: // AlignedFree(my_array); // // Or using scoped_ptr_malloc: // // scoped_ptr_malloc my_array( // static_cast(AlignedAlloc(size, alignment))); #ifndef BASE_MEMORY_ALIGNED_MEMORY_H_ #define BASE_MEMORY_ALIGNED_MEMORY_H_ #include "compile_assert.h" #include "compiler_specific.h" #if defined(COMPILER_MSVC) #include #else #include #endif namespace chromium { // AlignedMemory is specialized for all supported alignments. // Make sure we get a compiler error if someone uses an unsupported alignment. template struct AlignedMemory {}; #define BASE_DECL_ALIGNED_MEMORY(byte_alignment) \ template \ class AlignedMemory { \ public: \ ALIGNAS(byte_alignment) unsigned char data_[Size]; \ void* void_data() { return static_cast(data_); } \ const void* void_data() const { \ return static_cast(data_); \ } \ template \ Type* data_as() { return static_cast(void_data()); } \ template \ const Type* data_as() const { \ return static_cast(void_data()); \ } \ private: \ void* operator new(size_t); \ void operator delete(void*); \ } // Specialization for all alignments is required because MSVC (as of VS 2008) // does not understand ALIGNAS(ALIGNOF(Type)) or ALIGNAS(template_param). // Greater than 4096 alignment is not supported by some compilers, so 4096 is // the maximum specified here. BASE_DECL_ALIGNED_MEMORY(1); BASE_DECL_ALIGNED_MEMORY(2); BASE_DECL_ALIGNED_MEMORY(4); BASE_DECL_ALIGNED_MEMORY(8); BASE_DECL_ALIGNED_MEMORY(16); BASE_DECL_ALIGNED_MEMORY(32); BASE_DECL_ALIGNED_MEMORY(64); BASE_DECL_ALIGNED_MEMORY(128); BASE_DECL_ALIGNED_MEMORY(256); BASE_DECL_ALIGNED_MEMORY(512); BASE_DECL_ALIGNED_MEMORY(1024); BASE_DECL_ALIGNED_MEMORY(2048); BASE_DECL_ALIGNED_MEMORY(4096); #undef BASE_DECL_ALIGNED_MEMORY #if defined(OS_ANDROID) || defined(OS_NACL) #include #endif inline void* AlignedAlloc(size_t size, size_t alignment) { void* ptr = NULL; #if defined(COMPILER_MSVC) ptr = _aligned_malloc(size, alignment); #elif defined(OS_ANDROID) || defined(OS_NACL) // Both Android and NaCl technically support posix_memalign(), but do not expose // it in the current version of the library headers used by Chrome. Luckily, // memalign() on both platforms returns pointers which can safely be used with // free(), so we can use it instead. Issues filed with each project for docs: // http://code.google.com/p/android/issues/detail?id=35391 // http://code.google.com/p/chromium/issues/detail?id=138579 ptr = memalign(alignment, size); #else if (posix_memalign(&ptr, alignment, size)) ptr = NULL; #endif return ptr; } inline void AlignedFree(void* ptr) { #if defined(COMPILER_MSVC) _aligned_free(ptr); #else free(ptr); #endif } // Helper class for use with scoped_ptr_malloc. class ScopedPtrAlignedFree { public: inline void operator()(void* ptr) const { AlignedFree(ptr); } }; // Codership: added a convenience wsrapper over AlignedMemory template class AlignedBuffer { public: T* base_ptr() { return buf_.template data_as(); } const T* base_ptr() const { return buf_.template data_as(); } size_t size() const { return capacity; } private: // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. chromium::AlignedMemory buf_; #if defined(__GNUC__) && !defined(ARCH_CPU_X86_FAMILY) CHROMIUM_COMPILE_ASSERT(ALIGNOF(T) <= 16, crbug_115612); #endif }; } // namespace chromium #endif // BASE_MEMORY_ALIGNED_MEMORY_H_ galera-26.4.3/asio/0000775000177500017540000000000013540715002012311 5ustar dbartmygalera-26.4.3/asio/asio/0000775000177500017540000000000013540715002013244 5ustar dbartmygalera-26.4.3/asio/asio/error.hpp0000664000177500017540000002110013540715002015100 0ustar dbartmy// // error.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ERROR_HPP #define ASIO_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/system_error.hpp" #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(ASIO_WINDOWS_RUNTIME) # include #else # include # include #endif #if defined(GENERATING_DOCUMENTATION) /// INTERNAL ONLY. # define ASIO_NATIVE_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_SOCKET_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_NETDB_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_GETADDRINFO_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_WIN_OR_POSIX(e_win, e_posix) implementation_defined #elif defined(ASIO_WINDOWS_RUNTIME) # define ASIO_NATIVE_ERROR(e) __HRESULT_FROM_WIN32(e) # define ASIO_SOCKET_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e) # define ASIO_NETDB_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e) # define ASIO_GETADDRINFO_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e) # define ASIO_WIN_OR_POSIX(e_win, e_posix) e_win #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # define ASIO_NATIVE_ERROR(e) e # define ASIO_SOCKET_ERROR(e) WSA ## e # define ASIO_NETDB_ERROR(e) WSA ## e # define ASIO_GETADDRINFO_ERROR(e) WSA ## e # define ASIO_WIN_OR_POSIX(e_win, e_posix) e_win #else # define ASIO_NATIVE_ERROR(e) e # define ASIO_SOCKET_ERROR(e) e # define ASIO_NETDB_ERROR(e) e # define ASIO_GETADDRINFO_ERROR(e) e # define ASIO_WIN_OR_POSIX(e_win, e_posix) e_posix #endif #include "asio/detail/push_options.hpp" namespace asio { namespace error { enum basic_errors { /// Permission denied. access_denied = ASIO_SOCKET_ERROR(EACCES), /// Address family not supported by protocol. address_family_not_supported = ASIO_SOCKET_ERROR(EAFNOSUPPORT), /// Address already in use. address_in_use = ASIO_SOCKET_ERROR(EADDRINUSE), /// Transport endpoint is already connected. already_connected = ASIO_SOCKET_ERROR(EISCONN), /// Operation already in progress. already_started = ASIO_SOCKET_ERROR(EALREADY), /// Broken pipe. broken_pipe = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_BROKEN_PIPE), ASIO_NATIVE_ERROR(EPIPE)), /// A connection has been aborted. connection_aborted = ASIO_SOCKET_ERROR(ECONNABORTED), /// Connection refused. connection_refused = ASIO_SOCKET_ERROR(ECONNREFUSED), /// Connection reset by peer. connection_reset = ASIO_SOCKET_ERROR(ECONNRESET), /// Bad file descriptor. bad_descriptor = ASIO_SOCKET_ERROR(EBADF), /// Bad address. fault = ASIO_SOCKET_ERROR(EFAULT), /// No route to host. host_unreachable = ASIO_SOCKET_ERROR(EHOSTUNREACH), /// Operation now in progress. in_progress = ASIO_SOCKET_ERROR(EINPROGRESS), /// Interrupted system call. interrupted = ASIO_SOCKET_ERROR(EINTR), /// Invalid argument. invalid_argument = ASIO_SOCKET_ERROR(EINVAL), /// Message too long. message_size = ASIO_SOCKET_ERROR(EMSGSIZE), /// The name was too long. name_too_long = ASIO_SOCKET_ERROR(ENAMETOOLONG), /// Network is down. network_down = ASIO_SOCKET_ERROR(ENETDOWN), /// Network dropped connection on reset. network_reset = ASIO_SOCKET_ERROR(ENETRESET), /// Network is unreachable. network_unreachable = ASIO_SOCKET_ERROR(ENETUNREACH), /// Too many open files. no_descriptors = ASIO_SOCKET_ERROR(EMFILE), /// No buffer space available. no_buffer_space = ASIO_SOCKET_ERROR(ENOBUFS), /// Cannot allocate memory. no_memory = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_OUTOFMEMORY), ASIO_NATIVE_ERROR(ENOMEM)), /// Operation not permitted. no_permission = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_ACCESS_DENIED), ASIO_NATIVE_ERROR(EPERM)), /// Protocol not available. no_protocol_option = ASIO_SOCKET_ERROR(ENOPROTOOPT), /// No such device. no_such_device = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_BAD_UNIT), ASIO_NATIVE_ERROR(ENODEV)), /// Transport endpoint is not connected. not_connected = ASIO_SOCKET_ERROR(ENOTCONN), /// Socket operation on non-socket. not_socket = ASIO_SOCKET_ERROR(ENOTSOCK), /// Operation cancelled. operation_aborted = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_OPERATION_ABORTED), ASIO_NATIVE_ERROR(ECANCELED)), /// Operation not supported. operation_not_supported = ASIO_SOCKET_ERROR(EOPNOTSUPP), /// Cannot send after transport endpoint shutdown. shut_down = ASIO_SOCKET_ERROR(ESHUTDOWN), /// Connection timed out. timed_out = ASIO_SOCKET_ERROR(ETIMEDOUT), /// Resource temporarily unavailable. try_again = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_RETRY), ASIO_NATIVE_ERROR(EAGAIN)), /// The socket is marked non-blocking and the requested operation would block. would_block = ASIO_SOCKET_ERROR(EWOULDBLOCK) }; enum netdb_errors { /// Host not found (authoritative). host_not_found = ASIO_NETDB_ERROR(HOST_NOT_FOUND), /// Host not found (non-authoritative). host_not_found_try_again = ASIO_NETDB_ERROR(TRY_AGAIN), /// The query is valid but does not have associated address data. no_data = ASIO_NETDB_ERROR(NO_DATA), /// A non-recoverable error occurred. no_recovery = ASIO_NETDB_ERROR(NO_RECOVERY) }; enum addrinfo_errors { /// The service is not supported for the given socket type. service_not_found = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(WSATYPE_NOT_FOUND), ASIO_GETADDRINFO_ERROR(EAI_SERVICE)), /// The socket type is not supported. socket_type_not_supported = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(WSAESOCKTNOSUPPORT), ASIO_GETADDRINFO_ERROR(EAI_SOCKTYPE)) }; enum misc_errors { /// Already open. already_open = 1, /// End of file or stream. eof, /// Element not found. not_found, /// The descriptor cannot fit into the select system call's fd_set. fd_set_failure }; inline const asio::error_category& get_system_category() { return asio::system_category(); } #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) extern ASIO_DECL const asio::error_category& get_netdb_category(); extern ASIO_DECL const asio::error_category& get_addrinfo_category(); #else // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) inline const asio::error_category& get_netdb_category() { return get_system_category(); } inline const asio::error_category& get_addrinfo_category() { return get_system_category(); } #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) extern ASIO_DECL const asio::error_category& get_misc_category(); static const asio::error_category& system_category = asio::error::get_system_category(); static const asio::error_category& netdb_category = asio::error::get_netdb_category(); static const asio::error_category& addrinfo_category = asio::error::get_addrinfo_category(); static const asio::error_category& misc_category = asio::error::get_misc_category(); } // namespace error } // namespace asio #if defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace std { template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; } // namespace std #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace asio { namespace error { inline asio::error_code make_error_code(basic_errors e) { return asio::error_code( static_cast(e), get_system_category()); } inline asio::error_code make_error_code(netdb_errors e) { return asio::error_code( static_cast(e), get_netdb_category()); } inline asio::error_code make_error_code(addrinfo_errors e) { return asio::error_code( static_cast(e), get_addrinfo_category()); } inline asio::error_code make_error_code(misc_errors e) { return asio::error_code( static_cast(e), get_misc_category()); } } // namespace error } // namespace asio #include "asio/detail/pop_options.hpp" #undef ASIO_NATIVE_ERROR #undef ASIO_SOCKET_ERROR #undef ASIO_NETDB_ERROR #undef ASIO_GETADDRINFO_ERROR #undef ASIO_WIN_OR_POSIX #if defined(ASIO_HEADER_ONLY) # include "asio/impl/error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_ERROR_HPP galera-26.4.3/asio/asio/basic_io_object.hpp0000664000177500017540000001411013540715002017050 0ustar dbartmy// // basic_io_object.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_IO_OBJECT_HPP #define ASIO_BASIC_IO_OBJECT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_MOVE) namespace detail { // Type trait used to determine whether a service supports move. template class service_has_move { private: typedef IoObjectService service_type; typedef typename service_type::implementation_type implementation_type; template static auto asio_service_has_move_eval(T* t, U* u) -> decltype(t->move_construct(*u, *u), char()); static char (&asio_service_has_move_eval(...))[2]; public: static const bool value = sizeof(asio_service_has_move_eval( static_cast(0), static_cast(0))) == 1; }; } #endif // defined(ASIO_HAS_MOVE) /// Base class for all I/O objects. /** * @note All I/O objects are non-copyable. However, when using C++0x, certain * I/O objects do support move construction and move assignment. */ #if !defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) template #else template ::value> #endif class basic_io_object { public: /// The type of the service that will be used to provide I/O operations. typedef IoObjectService service_type; /// The underlying implementation type of I/O object. typedef typename service_type::implementation_type implementation_type; /// Get the io_service associated with the object. /** * This function may be used to obtain the io_service object that the I/O * object uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that the I/O object will use * to dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return service.get_io_service(); } protected: /// Construct a basic_io_object. /** * Performs: * @code get_service().construct(get_implementation()); @endcode */ explicit basic_io_object(asio::io_service& io_service) : service(asio::use_service(io_service)) { service.construct(implementation); } #if defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_io_object. /** * Performs: * @code get_service().move_construct( * get_implementation(), other.get_implementation()); @endcode * * @note Available only for services that support movability, */ basic_io_object(basic_io_object&& other); /// Move-assign a basic_io_object. /** * Performs: * @code get_service().move_assign(get_implementation(), * other.get_service(), other.get_implementation()); @endcode * * @note Available only for services that support movability, */ basic_io_object& operator=(basic_io_object&& other); #endif // defined(GENERATING_DOCUMENTATION) /// Protected destructor to prevent deletion through this type. /** * Performs: * @code get_service().destroy(get_implementation()); @endcode */ ~basic_io_object() { service.destroy(implementation); } /// Get the service associated with the I/O object. service_type& get_service() { return service; } /// Get the service associated with the I/O object. const service_type& get_service() const { return service; } /// (Deprecated: Use get_service().) The service associated with the I/O /// object. /** * @note Available only for services that do not support movability. */ service_type& service; /// Get the underlying implementation of the I/O object. implementation_type& get_implementation() { return implementation; } /// Get the underlying implementation of the I/O object. const implementation_type& get_implementation() const { return implementation; } /// (Deprecated: Use get_implementation().) The underlying implementation of /// the I/O object. implementation_type implementation; private: basic_io_object(const basic_io_object&); basic_io_object& operator=(const basic_io_object&); }; #if defined(ASIO_HAS_MOVE) // Specialisation for movable objects. template class basic_io_object { public: typedef IoObjectService service_type; typedef typename service_type::implementation_type implementation_type; asio::io_service& get_io_service() { return service_->get_io_service(); } protected: explicit basic_io_object(asio::io_service& io_service) : service_(&asio::use_service(io_service)) { service_->construct(implementation); } basic_io_object(basic_io_object&& other) : service_(&other.get_service()) { service_->move_construct(implementation, other.implementation); } ~basic_io_object() { service_->destroy(implementation); } basic_io_object& operator=(basic_io_object&& other) { service_->move_assign(implementation, *other.service_, other.implementation); service_ = other.service_; return *this; } service_type& get_service() { return *service_; } const service_type& get_service() const { return *service_; } implementation_type& get_implementation() { return implementation; } const implementation_type& get_implementation() const { return implementation; } implementation_type implementation; private: basic_io_object(const basic_io_object&); void operator=(const basic_io_object&); IoObjectService* service_; }; #endif // defined(ASIO_HAS_MOVE) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_IO_OBJECT_HPP galera-26.4.3/asio/asio/detail/0000775000177500017540000000000013540715002014506 5ustar dbartmygalera-26.4.3/asio/asio/detail/gcc_arm_fenced_block.hpp0000664000177500017540000000410513540715002021270 0ustar dbartmy// // detail/gcc_arm_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && defined(__arm__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_arm_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_arm_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_arm_fenced_block(full_t) { barrier(); } // Destructor. ~gcc_arm_fenced_block() { barrier(); } private: static void barrier() { #if defined(__ARM_ARCH_4__) \ || defined(__ARM_ARCH_4T__) \ || defined(__ARM_ARCH_5__) \ || defined(__ARM_ARCH_5E__) \ || defined(__ARM_ARCH_5T__) \ || defined(__ARM_ARCH_5TE__) \ || defined(__ARM_ARCH_5TEJ__) \ || defined(__ARM_ARCH_6__) \ || defined(__ARM_ARCH_6J__) \ || defined(__ARM_ARCH_6K__) \ || defined(__ARM_ARCH_6Z__) \ || defined(__ARM_ARCH_6ZK__) \ || defined(__ARM_ARCH_6T2__) # if defined(__thumb__) // This is just a placeholder and almost certainly not sufficient. __asm__ __volatile__ ("" : : : "memory"); # else // defined(__thumb__) int a = 0, b = 0; __asm__ __volatile__ ("swp %0, %1, [%2]" : "=&r"(a) : "r"(1), "r"(&b) : "memory", "cc"); # endif // defined(__thumb__) #else // ARMv7 and later. __asm__ __volatile__ ("dmb" : : : "memory"); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && defined(__arm__) #endif // ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/eventfd_select_interrupter.hpp0000664000177500017540000000450713540715002022662 0ustar dbartmy// // detail/eventfd_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EVENTFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class eventfd_select_interrupter { public: // Constructor. ASIO_DECL eventfd_select_interrupter(); // Destructor. ASIO_DECL ~eventfd_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // 64bit value will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // 64bit non-zero value may be written to this to wake up the select which is // waiting for the other end to become readable. This descriptor will only // differ from the read descriptor when a pipe is used. int write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/eventfd_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_EVENTFD) #endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP galera-26.4.3/asio/asio/detail/reactor_fwd.hpp0000664000177500017540000000200613540715002017514 0ustar dbartmy// // detail/reactor_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_FWD_HPP #define ASIO_DETAIL_REACTOR_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) typedef class null_reactor reactor; #elif defined(ASIO_HAS_IOCP) typedef class select_reactor reactor; #elif defined(ASIO_HAS_EPOLL) typedef class epoll_reactor reactor; #elif defined(ASIO_HAS_KQUEUE) typedef class kqueue_reactor reactor; #elif defined(ASIO_HAS_DEV_POLL) typedef class dev_poll_reactor reactor; #else typedef class select_reactor reactor; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_REACTOR_FWD_HPP galera-26.4.3/asio/asio/detail/buffered_stream_storage.hpp0000664000177500017540000000547213540715002022110 0ustar dbartmy// // detail/buffered_stream_storage.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP #define ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/assert.hpp" #include #include #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class buffered_stream_storage { public: // The type of the bytes stored in the buffer. typedef unsigned char byte_type; // The type used for offsets into the buffer. typedef std::size_t size_type; // Constructor. explicit buffered_stream_storage(std::size_t buffer_capacity) : begin_offset_(0), end_offset_(0), buffer_(buffer_capacity) { } /// Clear the buffer. void clear() { begin_offset_ = 0; end_offset_ = 0; } // Return a pointer to the beginning of the unread data. mutable_buffer data() { return asio::buffer(buffer_) + begin_offset_; } // Return a pointer to the beginning of the unread data. const_buffer data() const { return asio::buffer(buffer_) + begin_offset_; } // Is there no unread data in the buffer. bool empty() const { return begin_offset_ == end_offset_; } // Return the amount of unread data the is in the buffer. size_type size() const { return end_offset_ - begin_offset_; } // Resize the buffer to the specified length. void resize(size_type length) { ASIO_ASSERT(length <= capacity()); if (begin_offset_ + length <= capacity()) { end_offset_ = begin_offset_ + length; } else { using namespace std; // For memmove. memmove(&buffer_[0], &buffer_[0] + begin_offset_, size()); end_offset_ = length; begin_offset_ = 0; } } // Return the maximum size for data in the buffer. size_type capacity() const { return buffer_.size(); } // Consume multiple bytes from the beginning of the buffer. void consume(size_type count) { ASIO_ASSERT(begin_offset_ + count <= end_offset_); begin_offset_ += count; if (empty()) clear(); } private: // The offset to the beginning of the unread data. size_type begin_offset_; // The offset to the end of the unread data. size_type end_offset_; // The data in the buffer. std::vector buffer_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP galera-26.4.3/asio/asio/detail/resolve_endpoint_op.hpp0000664000177500017540000000777513540715002021314 0ustar dbartmy// // detail/resolve_endpoint_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP #define ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolve_endpoint_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(resolve_endpoint_op); typedef typename Protocol::endpoint endpoint_type; typedef asio::ip::basic_resolver_iterator iterator_type; resolve_endpoint_op(socket_ops::weak_cancel_token_type cancel_token, const endpoint_type& endpoint, io_service_impl& ios, Handler& handler) : operation(&resolve_endpoint_op::do_complete), cancel_token_(cancel_token), endpoint_(endpoint), io_service_impl_(ios), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the operation object. resolve_endpoint_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner && owner != &o->io_service_impl_) { // The operation is being run on the worker io_service. Time to perform // the resolver operation. // Perform the blocking endpoint resolution operation. char host_name[NI_MAXHOST]; char service_name[NI_MAXSERV]; socket_ops::background_getnameinfo(o->cancel_token_, o->endpoint_.data(), o->endpoint_.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV, o->endpoint_.protocol().type(), o->ec_); o->iter_ = iterator_type::create(o->endpoint_, host_name, service_name); // Pass operation back to main io_service for completion. o->io_service_impl_.post_deferred_completion(o); p.v = p.p = 0; } else { // The operation has been returned to the main io_service. The completion // handler is ready to be delivered. ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated // before the upcall is made. Even if we're not about to make an upcall, // a sub-object of the handler may be the true owner of the memory // associated with the handler. Consequently, a local copy of the handler // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->iter_); p.h = asio::detail::addressof(handler.handler_); p.reset(); if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } } private: socket_ops::weak_cancel_token_type cancel_token_; endpoint_type endpoint_; io_service_impl& io_service_impl_; Handler handler_; asio::error_code ec_; iterator_type iter_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP galera-26.4.3/asio/asio/detail/handler_cont_helpers.hpp0000664000177500017540000000240713540715002021404 0ustar dbartmy// // detail/handler_cont_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP #define ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/handler_continuation_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_is_continuation must be made from a namespace that // does not contain overloads of this function. This namespace is defined here // for that purpose. namespace asio_handler_cont_helpers { template inline bool is_continuation(Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) return false; #else using asio::asio_handler_is_continuation; return asio_handler_is_continuation( asio::detail::addressof(context)); #endif } } // namespace asio_handler_cont_helpers #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP galera-26.4.3/asio/asio/detail/consuming_buffers.hpp0000664000177500017540000001626413540715002020746 0ustar dbartmy// // detail/consuming_buffers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONSUMING_BUFFERS_HPP #define ASIO_DETAIL_CONSUMING_BUFFERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/buffer.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A proxy iterator for a sub-range in a list of buffers. template class consuming_buffers_iterator { public: /// The type used for the distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of the value pointed to by the iterator. typedef Buffer value_type; /// The type of the result of applying operator->() to the iterator. typedef const Buffer* pointer; /// The type of the result of applying operator*() to the iterator. typedef const Buffer& reference; /// The iterator category. typedef std::forward_iterator_tag iterator_category; // Default constructor creates an end iterator. consuming_buffers_iterator() : at_end_(true) { } // Construct with a buffer for the first entry and an iterator // range for the remaining entries. consuming_buffers_iterator(bool at_end, const Buffer& first, Buffer_Iterator begin_remainder, Buffer_Iterator end_remainder, std::size_t max_size) : at_end_(max_size > 0 ? at_end : true), first_(buffer(first, max_size)), begin_remainder_(begin_remainder), end_remainder_(end_remainder), offset_(0), max_size_(max_size) { } // Dereference an iterator. const Buffer& operator*() const { return dereference(); } // Dereference an iterator. const Buffer* operator->() const { return &dereference(); } // Increment operator (prefix). consuming_buffers_iterator& operator++() { increment(); return *this; } // Increment operator (postfix). consuming_buffers_iterator operator++(int) { consuming_buffers_iterator tmp(*this); ++*this; return tmp; } // Test two iterators for equality. friend bool operator==(const consuming_buffers_iterator& a, const consuming_buffers_iterator& b) { return a.equal(b); } // Test two iterators for inequality. friend bool operator!=(const consuming_buffers_iterator& a, const consuming_buffers_iterator& b) { return !a.equal(b); } private: void increment() { if (!at_end_) { if (begin_remainder_ == end_remainder_ || offset_ + buffer_size(first_) >= max_size_) { at_end_ = true; } else { offset_ += buffer_size(first_); first_ = buffer(*begin_remainder_++, max_size_ - offset_); } } } bool equal(const consuming_buffers_iterator& other) const { if (at_end_ && other.at_end_) return true; return !at_end_ && !other.at_end_ && buffer_cast(first_) == buffer_cast(other.first_) && buffer_size(first_) == buffer_size(other.first_) && begin_remainder_ == other.begin_remainder_ && end_remainder_ == other.end_remainder_; } const Buffer& dereference() const { return first_; } bool at_end_; Buffer first_; Buffer_Iterator begin_remainder_; Buffer_Iterator end_remainder_; std::size_t offset_; std::size_t max_size_; }; // A proxy for a sub-range in a list of buffers. template class consuming_buffers { public: // The type for each element in the list of buffers. typedef Buffer value_type; // A forward-only iterator type that may be used to read elements. typedef consuming_buffers_iterator const_iterator; // Construct to represent the entire list of buffers. consuming_buffers(const Buffers& buffers) : buffers_(buffers), at_end_(buffers_.begin() == buffers_.end()), begin_remainder_(buffers_.begin()), max_size_((std::numeric_limits::max)()) { if (!at_end_) { first_ = *buffers_.begin(); ++begin_remainder_; } } // Copy constructor. consuming_buffers(const consuming_buffers& other) : buffers_(other.buffers_), at_end_(other.at_end_), first_(other.first_), begin_remainder_(buffers_.begin()), max_size_(other.max_size_) { typename Buffers::const_iterator first = other.buffers_.begin(); typename Buffers::const_iterator second = other.begin_remainder_; std::advance(begin_remainder_, std::distance(first, second)); } // Assignment operator. consuming_buffers& operator=(const consuming_buffers& other) { buffers_ = other.buffers_; at_end_ = other.at_end_; first_ = other.first_; begin_remainder_ = buffers_.begin(); typename Buffers::const_iterator first = other.buffers_.begin(); typename Buffers::const_iterator second = other.begin_remainder_; std::advance(begin_remainder_, std::distance(first, second)); max_size_ = other.max_size_; return *this; } // Get a forward-only iterator to the first element. const_iterator begin() const { return const_iterator(at_end_, first_, begin_remainder_, buffers_.end(), max_size_); } // Get a forward-only iterator for one past the last element. const_iterator end() const { return const_iterator(); } // Set the maximum size for a single transfer. void prepare(std::size_t max_size) { max_size_ = max_size; } // Consume the specified number of bytes from the buffers. void consume(std::size_t size) { // Remove buffers from the start until the specified size is reached. while (size > 0 && !at_end_) { if (buffer_size(first_) <= size) { size -= buffer_size(first_); if (begin_remainder_ == buffers_.end()) at_end_ = true; else first_ = *begin_remainder_++; } else { first_ = first_ + size; size = 0; } } // Remove any more empty buffers at the start. while (!at_end_ && buffer_size(first_) == 0) { if (begin_remainder_ == buffers_.end()) at_end_ = true; else first_ = *begin_remainder_++; } } private: Buffers buffers_; bool at_end_; Buffer first_; typename Buffers::const_iterator begin_remainder_; std::size_t max_size_; }; // Specialisation for null_buffers to ensure that the null_buffers type is // always passed through to the underlying read or write operation. template class consuming_buffers : public asio::null_buffers { public: consuming_buffers(const asio::null_buffers&) { // No-op. } void prepare(std::size_t) { // No-op. } void consume(std::size_t) { // No-op. } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CONSUMING_BUFFERS_HPP galera-26.4.3/asio/asio/detail/reactive_socket_service.hpp0000664000177500017540000003554213540715002022122 0ustar dbartmy// // detail/reactive_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_socket_accept_op.hpp" #include "asio/detail/reactive_socket_connect_op.hpp" #include "asio/detail/reactive_socket_recvfrom_op.hpp" #include "asio/detail/reactive_socket_sendto_op.hpp" #include "asio/detail/reactive_socket_service_base.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_service : public reactive_socket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef socket_type native_handle_type; // The implementation type of the socket. struct implementation_type : reactive_socket_service_base::base_implementation_type { // Default constructor. implementation_type() : protocol_(endpoint_type().protocol()) { } // The protocol associated with the socket. protocol_type protocol_; }; // Constructor. reactive_socket_service(asio::io_service& io_service) : reactive_socket_service_base(io_service) { } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, reactive_socket_service_base& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, typename reactive_socket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (!do_open(impl, protocol.family(), protocol.type(), protocol.protocol(), ec)) impl.protocol_ = protocol; return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (!do_assign(impl, protocol.type(), native_socket, ec)) impl.protocol_ = protocol; return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type& impl) { return impl.socket_; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec); return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { socket_ops::setsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); socket_ops::getsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getpeername(impl.socket_, endpoint.data(), &addr_len, false, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_sendto(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, destination.data(), destination.size(), ec); } // Wait until data can be sent without blocking. size_t send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_sendto_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, destination, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to")); start_op(impl, reactor::write_op, p.p, is_continuation, true, false); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); std::size_t addr_len = sender_endpoint.capacity(); std::size_t bytes_recvd = socket_ops::sync_recvfrom( impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, sender_endpoint.data(), &addr_len, ec); if (!ec) sender_endpoint.resize(addr_len); return bytes_recvd; } // Wait until data can be received without blocking. size_t receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recvfrom_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; int protocol = impl.protocol_.type(); p.p = new (p.v) op(impl.socket_, protocol, buffers, sender_endpoint, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, true, false); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from(null_buffers)")); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Accept a new connection. template asio::error_code accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec) { // We cannot accept a socket that is already open. if (peer.is_open()) { ec = asio::error::already_open; return ec; } std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0; socket_holder new_socket(socket_ops::sync_accept(impl.socket_, impl.state_, peer_endpoint ? peer_endpoint->data() : 0, peer_endpoint ? &addr_len : 0, ec)); // On success, assign new connection to peer socket object. if (new_socket.get() != invalid_socket) { if (peer_endpoint) peer_endpoint->resize(addr_len); if (!peer.assign(impl.protocol_, new_socket.get(), ec)) new_socket.release(); } return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, peer, impl.protocol_, peer_endpoint, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_accept")); start_accept_op(impl, p.p, is_continuation, peer.is_open()); p.v = p.p = 0; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { socket_ops::sync_connect(impl.socket_, peer_endpoint.data(), peer_endpoint.size(), ec); return ec; } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_connect")); start_connect_op(impl, p.p, is_continuation, peer_endpoint.data(), peer_endpoint.size()); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP galera-26.4.3/asio/asio/detail/win_mutex.hpp0000664000177500017540000000323213540715002017236 0ustar dbartmy// // detail/win_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_MUTEX_HPP #define ASIO_DETAIL_WIN_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. ASIO_DECL win_mutex(); // Destructor. ~win_mutex() { ::DeleteCriticalSection(&crit_section_); } // Lock the mutex. void lock() { ::EnterCriticalSection(&crit_section_); } // Unlock the mutex. void unlock() { ::LeaveCriticalSection(&crit_section_); } private: // Initialisation must be performed in a separate function to the constructor // since the compiler does not support the use of structured exceptions and // C++ exceptions in the same function. ASIO_DECL int do_init(); ::CRITICAL_SECTION crit_section_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_MUTEX_HPP galera-26.4.3/asio/asio/detail/null_tss_ptr.hpp0000664000177500017540000000223313540715002017747 0ustar dbartmy// // detail/null_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_TSS_PTR_HPP #define ASIO_DETAIL_NULL_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class null_tss_ptr : private noncopyable { public: // Constructor. null_tss_ptr() : value_(0) { } // Destructor. ~null_tss_ptr() { } // Get the value. operator T*() const { return value_; } // Set the value. void operator=(T* value) { value_ = value; } private: T* value_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_TSS_PTR_HPP galera-26.4.3/asio/asio/detail/atomic_count.hpp0000664000177500017540000000244713540715002017712 0ustar dbartmy// // detail/atomic_count.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ATOMIC_COUNT_HPP #define ASIO_DETAIL_ATOMIC_COUNT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) // Nothing to include. #elif defined(ASIO_HAS_STD_ATOMIC) # include #else // defined(ASIO_HAS_STD_ATOMIC) # include #endif // defined(ASIO_HAS_STD_ATOMIC) namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef long atomic_count; inline void increment(atomic_count& a, long b) { a += b; } #elif defined(ASIO_HAS_STD_ATOMIC) typedef std::atomic atomic_count; inline void increment(atomic_count& a, long b) { a += b; } #else // defined(ASIO_HAS_STD_ATOMIC) typedef boost::detail::atomic_count atomic_count; inline void increment(atomic_count& a, long b) { while (b > 0) ++a, --b; } #endif // defined(ASIO_HAS_STD_ATOMIC) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ATOMIC_COUNT_HPP galera-26.4.3/asio/asio/detail/operation.hpp0000664000177500017540000000157013540715002017222 0ustar dbartmy// // detail/operation.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OPERATION_HPP #define ASIO_DETAIL_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_operation.hpp" #else # include "asio/detail/task_io_service_operation.hpp" #endif namespace asio { namespace detail { #if defined(ASIO_HAS_IOCP) typedef win_iocp_operation operation; #else typedef task_io_service_operation operation; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_OPERATION_HPP galera-26.4.3/asio/asio/detail/null_fenced_block.hpp0000664000177500017540000000164113540715002020651 0ustar dbartmy// // detail/null_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_FENCED_BLOCK_HPP #define ASIO_DETAIL_NULL_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_fenced_block : private noncopyable { public: enum half_or_full_t { half, full }; // Constructor. explicit null_fenced_block(half_or_full_t) { } // Destructor. ~null_fenced_block() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NULL_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/keyword_tss_ptr.hpp0000664000177500017540000000245013540715002020462 0ustar dbartmy// // detail/keyword_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_KEYWORD_TSS_PTR_HPP #define ASIO_DETAIL_KEYWORD_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class keyword_tss_ptr : private noncopyable { public: // Constructor. keyword_tss_ptr() { } // Destructor. ~keyword_tss_ptr() { } // Get the value. operator T*() const { return value_; } // Set the value. void operator=(T* value) { value_ = value; } private: static ASIO_THREAD_KEYWORD T* value_; }; template ASIO_THREAD_KEYWORD T* keyword_tss_ptr::value_; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) #endif // ASIO_DETAIL_KEYWORD_TSS_PTR_HPP galera-26.4.3/asio/asio/detail/win_static_mutex.hpp0000664000177500017540000000340313540715002020605 0ustar dbartmy// // detail/win_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_STATIC_MUTEX_HPP #define ASIO_DETAIL_WIN_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. ASIO_DECL void init(); // Initialisation must be performed in a separate function to the "public" // init() function since the compiler does not support the use of structured // exceptions and C++ exceptions in the same function. ASIO_DECL int do_init(); // Lock the mutex. void lock() { ::EnterCriticalSection(&crit_section_); } // Unlock the mutex. void unlock() { ::LeaveCriticalSection(&crit_section_); } bool initialised_; ::CRITICAL_SECTION crit_section_; }; #if defined(UNDER_CE) # define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0 } } #else // defined(UNDER_CE) # define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0, 0 } } #endif // defined(UNDER_CE) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_static_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_STATIC_MUTEX_HPP galera-26.4.3/asio/asio/detail/limits.hpp0000664000177500017540000000126413540715002016523 0ustar dbartmy// // detail/limits.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_LIMITS_HPP #define ASIO_DETAIL_LIMITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_LIMITS) # include #else // defined(ASIO_HAS_BOOST_LIMITS) # include #endif // defined(ASIO_HAS_BOOST_LIMITS) #endif // ASIO_DETAIL_LIMITS_HPP galera-26.4.3/asio/asio/detail/event.hpp0000664000177500017540000000231513540715002016341 0ustar dbartmy// // detail/event.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EVENT_HPP #define ASIO_DETAIL_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_event.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_event.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_event.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_event.hpp" #else # error Only Windows, POSIX and std::condition_variable are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_event event; #elif defined(ASIO_WINDOWS) typedef win_event event; #elif defined(ASIO_HAS_PTHREADS) typedef posix_event event; #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_event event; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_EVENT_HPP galera-26.4.3/asio/asio/detail/reactive_socket_connect_op.hpp0000664000177500017540000000616113540715002022604 0ustar dbartmy// // detail/reactive_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_socket_connect_op_base : public reactor_op { public: reactive_socket_connect_op_base(socket_type socket, func_type complete_func) : reactor_op(&reactive_socket_connect_op_base::do_perform, complete_func), socket_(socket) { } static bool do_perform(reactor_op* base) { reactive_socket_connect_op_base* o( static_cast(base)); return socket_ops::non_blocking_connect(o->socket_, o->ec_); } private: socket_type socket_; }; template class reactive_socket_connect_op : public reactive_socket_connect_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_connect_op); reactive_socket_connect_op(socket_type socket, Handler& handler) : reactive_socket_connect_op_base(socket, &reactive_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_connect_op* o (static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP galera-26.4.3/asio/asio/detail/timer_queue_ptime.hpp0000664000177500017540000000532713540715002020750 0ustar dbartmy// // detail/timer_queue_ptime.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP #define ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/time_traits.hpp" #include "asio/detail/timer_queue.hpp" #include "asio/detail/push_options.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) namespace asio { namespace detail { struct forwarding_posix_time_traits : time_traits {}; // Template specialisation for the commonly used instantation. template <> class timer_queue > : public timer_queue_base { public: // The time type. typedef boost::posix_time::ptime time_type; // The duration type. typedef boost::posix_time::time_duration duration_type; // Per-timer data. typedef timer_queue::per_timer_data per_timer_data; // Constructor. ASIO_DECL timer_queue(); // Destructor. ASIO_DECL virtual ~timer_queue(); // Add a new timer to the queue. Returns true if this is the timer that is // earliest in the queue, in which case the reactor's event demultiplexing // function call may need to be interrupted and restarted. ASIO_DECL bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op); // Whether there are no timers in the queue. ASIO_DECL virtual bool empty() const; // Get the time for the timer that is earliest in the queue. ASIO_DECL virtual long wait_duration_msec(long max_duration) const; // Get the time for the timer that is earliest in the queue. ASIO_DECL virtual long wait_duration_usec(long max_duration) const; // Dequeue all timers not later than the current time. ASIO_DECL virtual void get_ready_timers(op_queue& ops); // Dequeue all timers. ASIO_DECL virtual void get_all_timers(op_queue& ops); // Cancel and dequeue operations for the given timer. ASIO_DECL std::size_t cancel_timer( per_timer_data& timer, op_queue& ops, std::size_t max_cancelled = (std::numeric_limits::max)()); private: timer_queue impl_; }; } // namespace detail } // namespace asio #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/timer_queue_ptime.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP galera-26.4.3/asio/asio/detail/reactive_socket_service_base.hpp0000664000177500017540000003664213540715002023116 0ustar dbartmy// // detail/reactive_socket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) \ && !defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_socket_recv_op.hpp" #include "asio/detail/reactive_socket_recvmsg_op.hpp" #include "asio/detail/reactive_socket_send_op.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_socket_service_base { public: // The native type of a socket. typedef socket_type native_handle_type; // The implementation type of the socket. struct base_implementation_type { // The native socket representation. socket_type socket_; // The current state of the socket. socket_ops::state_type state_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; }; // Constructor. ASIO_DECL reactive_socket_service_base( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type& impl); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, reactive_socket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != invalid_socket; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Get the native socket representation. native_handle_type native_handle(base_implementation_type& impl) { return impl.socket_; } // Cancel all operations associated with the socket. ASIO_DECL asio::error_code cancel( base_implementation_type& impl, asio::error_code& ec); // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::sockatmark(impl.socket_, ec); } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::available(impl.socket_, ec); } // Place the socket into the state where it will listen for new connections. asio::error_code listen(base_implementation_type& impl, int backlog, asio::error_code& ec) { socket_ops::listen(impl.socket_, backlog, ec); return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { socket_ops::ioctl(impl.socket_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { socket_ops::shutdown(impl.socket_, what, ec); return ec; } // Send the given data to the peer. template size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_send(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be sent without blocking. size_t send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send")); start_op(impl, reactor::write_op, p.p, is_continuation, true, ((impl.state_ & socket_ops::stream_oriented) && buffer_sequence_adapter::all_empty(buffers))); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive some data from the peer. Returns the number of bytes received. template size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recv(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be received without blocking. size_t receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, buffers, flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, (flags & socket_base::message_out_of_band) == 0, ((impl.state_ & socket_ops::stream_oriented) && buffer_sequence_adapter::all_empty(buffers))); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive(null_buffers)")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive some data with associated flags. Returns the number of bytes // received. template size_t receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recvmsg(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), in_flags, out_flags, ec); } // Wait until data can be received without blocking. size_t receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recvmsg_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, in_flags, out_flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags")); start_op(impl, (in_flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, (in_flags & socket_base::message_out_of_band) == 0, false); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags(null_buffers)")); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; start_op(impl, (in_flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } protected: // Open a new socket implementation. ASIO_DECL asio::error_code do_open( base_implementation_type& impl, int af, int type, int protocol, asio::error_code& ec); // Assign a native socket to a socket implementation. ASIO_DECL asio::error_code do_assign( base_implementation_type& impl, int type, const native_handle_type& native_socket, asio::error_code& ec); // Start the asynchronous read or write operation. ASIO_DECL void start_op(base_implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop); // Start the asynchronous accept operation. ASIO_DECL void start_accept_op(base_implementation_type& impl, reactor_op* op, bool is_continuation, bool peer_is_open); // Start the asynchronous connect operation. ASIO_DECL void start_connect_op(base_implementation_type& impl, reactor_op* op, bool is_continuation, const socket_addr_type* addr, size_t addrlen); // The selector that performs event demultiplexing for the service. reactor& reactor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_socket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_IOCP) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP galera-26.4.3/asio/asio/detail/select_reactor.hpp0000664000177500017540000001650413540715002020223 0ustar dbartmy// // detail/select_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SELECT_REACTOR_HPP #define ASIO_DETAIL_SELECT_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include #include "asio/detail/fd_set_adapter.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/thread.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class select_reactor : public asio::detail::service_base { public: #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) enum op_types { read_op = 0, write_op = 1, except_op = 2, max_select_ops = 3, connect_op = 3, max_ops = 4 }; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) enum op_types { read_op = 0, write_op = 1, except_op = 2, max_select_ops = 3, connect_op = 1, max_ops = 3 }; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Per-descriptor data. struct per_descriptor_data { }; // Constructor. ASIO_DECL select_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~select_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task, but only if the reactor is not in its own thread. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op, bool is_continuation, bool); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data&, bool closing); // Remote the descriptor's registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run select once until interrupted or events are ready to be dispatched. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: #if defined(ASIO_HAS_IOCP) // Run the select loop in the thread. ASIO_DECL void run_thread(); // Entry point for the select loop thread. ASIO_DECL static void call_run_thread(select_reactor* reactor); #endif // defined(ASIO_HAS_IOCP) // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the select call. ASIO_DECL timeval* get_timeout(timeval& tv); // Cancel all operations associated with the given descriptor. This function // does not acquire the select_reactor's mutex. ASIO_DECL void cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // The interrupter is used to break a blocking select call. select_interrupter interrupter_; // The queues of read, write and except operations. reactor_op_queue op_queue_[max_ops]; // The file descriptor sets to be passed to the select system call. fd_set_adapter fd_sets_[max_select_ops]; // The timer queues. timer_queue_set timer_queues_; #if defined(ASIO_HAS_IOCP) // Does the reactor loop thread need to stop. bool stop_thread_; // The thread that is running the reactor loop. asio::detail::thread* thread_; #endif // defined(ASIO_HAS_IOCP) // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/select_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/select_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_SELECT_REACTOR_HPP galera-26.4.3/asio/asio/detail/reactive_serial_port_service.hpp0000664000177500017540000001701013540715002023143 0ustar dbartmy// // detail/reactive_serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/serial_port_base.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Extend reactive_descriptor_service to provide serial port support. class reactive_serial_port_service { public: // The native type of a serial port. typedef reactive_descriptor_service::native_handle_type native_handle_type; // The implementation type of the serial port. typedef reactive_descriptor_service::implementation_type implementation_type; ASIO_DECL reactive_serial_port_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new serial port implementation. void construct(implementation_type& impl) { descriptor_service_.construct(impl); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { descriptor_service_.move_construct(impl, other_impl); } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, reactive_serial_port_service& other_service, implementation_type& other_impl) { descriptor_service_.move_assign(impl, other_service.descriptor_service_, other_impl); } // Destroy a serial port implementation. void destroy(implementation_type& impl) { descriptor_service_.destroy(impl); } // Open the serial port using the specified device name. ASIO_DECL asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec); // Assign a native descriptor to a serial port implementation. asio::error_code assign(implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec) { return descriptor_service_.assign(impl, native_descriptor, ec); } // Determine whether the serial port is open. bool is_open(const implementation_type& impl) const { return descriptor_service_.is_open(impl); } // Destroy a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return descriptor_service_.close(impl, ec); } // Get the native serial port representation. native_handle_type native_handle(implementation_type& impl) { return descriptor_service_.native_handle(impl); } // Cancel all operations associated with the serial port. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return descriptor_service_.cancel(impl, ec); } // Set an option on the serial port. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return do_set_option(impl, &reactive_serial_port_service::store_option, &option, ec); } // Get an option from the serial port. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return do_get_option(impl, &reactive_serial_port_service::load_option, &option, ec); } // Send a break sequence to the serial port. asio::error_code send_break(implementation_type& impl, asio::error_code& ec) { errno = 0; descriptor_ops::error_wrapper(::tcsendbreak( descriptor_service_.native_handle(impl), 0), ec); return ec; } // Write the given data. Returns the number of bytes sent. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return descriptor_service_.write_some(impl, buffers, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { descriptor_service_.async_write_some(impl, buffers, handler); } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return descriptor_service_.read_some(impl, buffers, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { descriptor_service_.async_read_some(impl, buffers, handler); } private: // Function pointer type for storing a serial port option. typedef asio::error_code (*store_function_type)( const void*, termios&, asio::error_code&); // Helper function template to store a serial port option. template static asio::error_code store_option(const void* option, termios& storage, asio::error_code& ec) { return static_cast(option)->store( storage, ec); } // Helper function to set a serial port option. ASIO_DECL asio::error_code do_set_option( implementation_type& impl, store_function_type store, const void* option, asio::error_code& ec); // Function pointer type for loading a serial port option. typedef asio::error_code (*load_function_type)( void*, const termios&, asio::error_code&); // Helper function template to load a serial port option. template static asio::error_code load_option(void* option, const termios& storage, asio::error_code& ec) { return static_cast(option)->load(storage, ec); } // Helper function to get a serial port option. ASIO_DECL asio::error_code do_get_option( const implementation_type& impl, load_function_type load, void* option, asio::error_code& ec) const; // The implementation used for initiating asynchronous operations. reactive_descriptor_service descriptor_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_serial_port_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP galera-26.4.3/asio/asio/detail/std_mutex.hpp0000664000177500017540000000242213540715002017233 0ustar dbartmy// // detail/std_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_MUTEX_HPP #define ASIO_DETAIL_STD_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event; class std_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. std_mutex() { } // Destructor. ~std_mutex() { } // Lock the mutex. void lock() { mutex_.lock(); } // Unlock the mutex. void unlock() { mutex_.unlock(); } private: friend class std_event; std::mutex mutex_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_MUTEX_HPP galera-26.4.3/asio/asio/detail/win_event.hpp0000664000177500017540000000534413540715002017223 0ustar dbartmy// // detail/win_event.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_EVENT_HPP #define ASIO_DETAIL_WIN_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_event : private noncopyable { public: // Constructor. ASIO_DECL win_event(); // Destructor. ASIO_DECL ~win_event(); // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; ::SetEvent(events_[0]); } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) ::SetEvent(events_[1]); } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); ::SetEvent(events_[1]); return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; ::ResetEvent(events_[0]); state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); while ((state_ & 1) == 0) { state_ += 2; lock.unlock(); #if defined(ASIO_WINDOWS_APP) ::WaitForMultipleObjectsEx(2, events_, false, INFINITE, false); #else // defined(ASIO_WINDOWS_APP) ::WaitForMultipleObjects(2, events_, false, INFINITE); #endif // defined(ASIO_WINDOWS_APP) lock.lock(); state_ -= 2; } } private: HANDLE events_[2]; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_EVENT_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_send_op.hpp0000664000177500017540000000653613540715002022117 0ustar dbartmy// // detail/win_iocp_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_send_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_send_op); win_iocp_socket_send_op(socket_ops::weak_cancel_token_type cancel_token, const ConstBufferSequence& buffers, Handler& handler) : operation(&win_iocp_socket_send_op::do_complete), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_send(o->cancel_token_, ec); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; ConstBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP galera-26.4.3/asio/asio/detail/scoped_lock.hpp0000664000177500017540000000351313540715002017506 0ustar dbartmy// // detail/scoped_lock.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCOPED_LOCK_HPP #define ASIO_DETAIL_SCOPED_LOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to lock and unlock a mutex automatically. template class scoped_lock : private noncopyable { public: // Tag type used to distinguish constructors. enum adopt_lock_t { adopt_lock }; // Constructor adopts a lock that is already held. scoped_lock(Mutex& m, adopt_lock_t) : mutex_(m), locked_(true) { } // Constructor acquires the lock. explicit scoped_lock(Mutex& m) : mutex_(m) { mutex_.lock(); locked_ = true; } // Destructor releases the lock. ~scoped_lock() { if (locked_) mutex_.unlock(); } // Explicitly acquire the lock. void lock() { if (!locked_) { mutex_.lock(); locked_ = true; } } // Explicitly release the lock. void unlock() { if (locked_) { mutex_.unlock(); locked_ = false; } } // Test whether the lock is held. bool locked() const { return locked_; } // Get the underlying mutex. Mutex& mutex() { return mutex_; } private: // The underlying mutex. Mutex& mutex_; // Whether the mutex is currently locked or unlocked. bool locked_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCOPED_LOCK_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_recv_op.hpp0000664000177500017540000000713013540715002022114 0ustar dbartmy// // detail/win_iocp_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recv_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recv_op); win_iocp_socket_recv_op(socket_ops::state_type state, socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, Handler& handler) : operation(&win_iocp_socket_recv_op::do_complete), state_(state), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recv(o->state_, o->cancel_token_, buffer_sequence_adapter::all_empty(o->buffers_), ec, bytes_transferred); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::state_type state_; socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP galera-26.4.3/asio/asio/detail/socket_types.hpp0000664000177500017540000003433213540715002017740 0ustar dbartmy// // detail/socket_types.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_TYPES_HPP #define ASIO_DETAIL_SOCKET_TYPES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) // Empty. #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_) # error WinSock.h has already been included # endif // defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_) # if defined(__BORLANDC__) # include // Needed for __errno # if !defined(_WSPIAPI_H_) # define _WSPIAPI_H_ # define ASIO_WSPIAPI_H_DEFINED # endif // !defined(_WSPIAPI_H_) # endif // defined(__BORLANDC__) # if defined(WINAPI_FAMILY) # if ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0) # include # endif // ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0) # endif // defined(WINAPI_FAMILY) # include # include # if !defined(ASIO_WINDOWS_APP) # include # endif // !defined(ASIO_WINDOWS_APP) # if defined(ASIO_WSPIAPI_H_DEFINED) # undef _WSPIAPI_H_ # undef ASIO_WSPIAPI_H_DEFINED # endif // defined(ASIO_WSPIAPI_H_DEFINED) # if !defined(ASIO_NO_DEFAULT_LINKED_LIBS) # if defined(UNDER_CE) # pragma comment(lib, "ws2.lib") # elif defined(_MSC_VER) || defined(__BORLANDC__) # pragma comment(lib, "ws2_32.lib") # if !defined(ASIO_WINDOWS_APP) # pragma comment(lib, "mswsock.lib") # endif // !defined(ASIO_WINDOWS_APP) # endif // defined(_MSC_VER) || defined(__BORLANDC__) # endif // !defined(ASIO_NO_DEFAULT_LINKED_LIBS) # include "asio/detail/old_win_sdk_compat.hpp" #else # include # if !defined(__SYMBIAN32__) # include # endif # include # include # include # if defined(__hpux) # include # endif # if !defined(__hpux) || defined(__SELECT) # include # endif # include # include # include # include # if !defined(__SYMBIAN32__) # include # endif # include # include # include # include # if defined(__sun) # include # include # endif #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) const int max_addr_v4_str_len = 256; const int max_addr_v6_str_len = 256; typedef unsigned __int32 u_long_type; typedef unsigned __int16 u_short_type; struct in4_addr_type { u_long_type s_addr; }; struct in4_mreq_type { in4_addr_type imr_multiaddr, imr_interface; }; struct in6_addr_type { unsigned char s6_addr[16]; }; struct in6_mreq_type { in6_addr_type ipv6mr_multiaddr; unsigned long ipv6mr_interface; }; struct socket_addr_type { int sa_family; }; struct sockaddr_in4_type { int sin_family; in4_addr_type sin_addr; u_short_type sin_port; }; struct sockaddr_in6_type { int sin6_family; in6_addr_type sin6_addr; u_short_type sin6_port; u_long_type sin6_flowinfo; u_long_type sin6_scope_id; }; struct sockaddr_storage_type { int ss_family; unsigned char ss_bytes[128 - sizeof(int)]; }; struct addrinfo_type { int ai_flags; int ai_family, ai_socktype, ai_protocol; int ai_addrlen; const void* ai_addr; const char* ai_canonname; addrinfo_type* ai_next; }; struct linger_type { u_short_type l_onoff, l_linger; }; typedef u_long_type ioctl_arg_type; typedef int signed_size_type; # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC 0 # define ASIO_OS_DEF_AF_INET 2 # define ASIO_OS_DEF_AF_INET6 23 # define ASIO_OS_DEF_SOCK_STREAM 1 # define ASIO_OS_DEF_SOCK_DGRAM 2 # define ASIO_OS_DEF_SOCK_RAW 3 # define ASIO_OS_DEF_SOCK_SEQPACKET 5 # define ASIO_OS_DEF_IPPROTO_IP 0 # define ASIO_OS_DEF_IPPROTO_IPV6 41 # define ASIO_OS_DEF_IPPROTO_TCP 6 # define ASIO_OS_DEF_IPPROTO_UDP 17 # define ASIO_OS_DEF_IPPROTO_ICMP 1 # define ASIO_OS_DEF_IPPROTO_ICMPV6 58 # define ASIO_OS_DEF_FIONBIO 1 # define ASIO_OS_DEF_FIONREAD 2 # define ASIO_OS_DEF_INADDR_ANY 0 # define ASIO_OS_DEF_MSG_OOB 0x1 # define ASIO_OS_DEF_MSG_PEEK 0x2 # define ASIO_OS_DEF_MSG_DONTROUTE 0x4 # define ASIO_OS_DEF_MSG_EOR 0 // Not supported. # define ASIO_OS_DEF_SHUT_RD 0x0 # define ASIO_OS_DEF_SHUT_WR 0x1 # define ASIO_OS_DEF_SHUT_RDWR 0x2 # define ASIO_OS_DEF_SOMAXCONN 0x7fffffff # define ASIO_OS_DEF_SOL_SOCKET 0xffff # define ASIO_OS_DEF_SO_BROADCAST 0x20 # define ASIO_OS_DEF_SO_DEBUG 0x1 # define ASIO_OS_DEF_SO_DONTROUTE 0x10 # define ASIO_OS_DEF_SO_KEEPALIVE 0x8 # define ASIO_OS_DEF_SO_LINGER 0x80 # define ASIO_OS_DEF_SO_SNDBUF 0x1001 # define ASIO_OS_DEF_SO_RCVBUF 0x1002 # define ASIO_OS_DEF_SO_SNDLOWAT 0x1003 # define ASIO_OS_DEF_SO_RCVLOWAT 0x1004 # define ASIO_OS_DEF_SO_REUSEADDR 0x4 # define ASIO_OS_DEF_TCP_NODELAY 0x1 # define ASIO_OS_DEF_IP_MULTICAST_IF 2 # define ASIO_OS_DEF_IP_MULTICAST_TTL 3 # define ASIO_OS_DEF_IP_MULTICAST_LOOP 4 # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP 5 # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP 6 # define ASIO_OS_DEF_IP_TTL 7 # define ASIO_OS_DEF_IPV6_UNICAST_HOPS 4 # define ASIO_OS_DEF_IPV6_MULTICAST_IF 9 # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS 10 # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP 11 # define ASIO_OS_DEF_IPV6_JOIN_GROUP 12 # define ASIO_OS_DEF_IPV6_LEAVE_GROUP 13 # define ASIO_OS_DEF_AI_CANONNAME 0x2 # define ASIO_OS_DEF_AI_PASSIVE 0x1 # define ASIO_OS_DEF_AI_NUMERICHOST 0x4 # define ASIO_OS_DEF_AI_NUMERICSERV 0x8 # define ASIO_OS_DEF_AI_V4MAPPED 0x800 # define ASIO_OS_DEF_AI_ALL 0x100 # define ASIO_OS_DEF_AI_ADDRCONFIG 0x400 #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef SOCKET socket_type; const SOCKET invalid_socket = INVALID_SOCKET; const int socket_error_retval = SOCKET_ERROR; const int max_addr_v4_str_len = 256; const int max_addr_v6_str_len = 256; typedef sockaddr socket_addr_type; typedef in_addr in4_addr_type; typedef ip_mreq in4_mreq_type; typedef sockaddr_in sockaddr_in4_type; # if defined(ASIO_HAS_OLD_WIN_SDK) typedef in6_addr_emulation in6_addr_type; typedef ipv6_mreq_emulation in6_mreq_type; typedef sockaddr_in6_emulation sockaddr_in6_type; typedef sockaddr_storage_emulation sockaddr_storage_type; typedef addrinfo_emulation addrinfo_type; # else typedef in6_addr in6_addr_type; typedef ipv6_mreq in6_mreq_type; typedef sockaddr_in6 sockaddr_in6_type; typedef sockaddr_storage sockaddr_storage_type; typedef addrinfo addrinfo_type; # endif typedef ::linger linger_type; typedef unsigned long ioctl_arg_type; typedef u_long u_long_type; typedef u_short u_short_type; typedef int signed_size_type; # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC # define ASIO_OS_DEF_AF_INET AF_INET # define ASIO_OS_DEF_AF_INET6 AF_INET6 # define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM # define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM # define ASIO_OS_DEF_SOCK_RAW SOCK_RAW # define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET # define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP # define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6 # define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP # define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP # define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP # define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6 # define ASIO_OS_DEF_FIONBIO FIONBIO # define ASIO_OS_DEF_FIONREAD FIONREAD # define ASIO_OS_DEF_INADDR_ANY INADDR_ANY # define ASIO_OS_DEF_MSG_OOB MSG_OOB # define ASIO_OS_DEF_MSG_PEEK MSG_PEEK # define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE # define ASIO_OS_DEF_MSG_EOR 0 // Not supported on Windows. # define ASIO_OS_DEF_SHUT_RD SD_RECEIVE # define ASIO_OS_DEF_SHUT_WR SD_SEND # define ASIO_OS_DEF_SHUT_RDWR SD_BOTH # define ASIO_OS_DEF_SOMAXCONN SOMAXCONN # define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET # define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST # define ASIO_OS_DEF_SO_DEBUG SO_DEBUG # define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE # define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE # define ASIO_OS_DEF_SO_LINGER SO_LINGER # define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF # define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF # define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT # define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT # define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR # define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY # define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF # define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL # define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP # define ASIO_OS_DEF_IP_TTL IP_TTL # define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP # define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP # define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP # define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME # define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE # define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST # if defined(AI_NUMERICSERV) # define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV # else # define ASIO_OS_DEF_AI_NUMERICSERV 0 # endif # if defined(AI_V4MAPPED) # define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED # else # define ASIO_OS_DEF_AI_V4MAPPED 0 # endif # if defined(AI_ALL) # define ASIO_OS_DEF_AI_ALL AI_ALL # else # define ASIO_OS_DEF_AI_ALL 0 # endif # if defined(AI_ADDRCONFIG) # define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG # else # define ASIO_OS_DEF_AI_ADDRCONFIG 0 # endif # if defined (_WIN32_WINNT) const int max_iov_len = 64; # else const int max_iov_len = 16; # endif #else typedef int socket_type; const int invalid_socket = -1; const int socket_error_retval = -1; const int max_addr_v4_str_len = INET_ADDRSTRLEN; #if defined(INET6_ADDRSTRLEN) const int max_addr_v6_str_len = INET6_ADDRSTRLEN + 1 + IF_NAMESIZE; #else // defined(INET6_ADDRSTRLEN) const int max_addr_v6_str_len = 256; #endif // defined(INET6_ADDRSTRLEN) typedef sockaddr socket_addr_type; typedef in_addr in4_addr_type; # if defined(__hpux) // HP-UX doesn't provide ip_mreq when _XOPEN_SOURCE_EXTENDED is defined. struct in4_mreq_type { struct in_addr imr_multiaddr; struct in_addr imr_interface; }; # else typedef ip_mreq in4_mreq_type; # endif typedef sockaddr_in sockaddr_in4_type; typedef in6_addr in6_addr_type; typedef ipv6_mreq in6_mreq_type; typedef sockaddr_in6 sockaddr_in6_type; typedef sockaddr_storage sockaddr_storage_type; typedef sockaddr_un sockaddr_un_type; typedef addrinfo addrinfo_type; typedef ::linger linger_type; typedef int ioctl_arg_type; typedef uint32_t u_long_type; typedef uint16_t u_short_type; #if defined(ASIO_HAS_SSIZE_T) typedef ssize_t signed_size_type; #else // defined(ASIO_HAS_SSIZE_T) typedef int signed_size_type; #endif // defined(ASIO_HAS_SSIZE_T) # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC # define ASIO_OS_DEF_AF_INET AF_INET # define ASIO_OS_DEF_AF_INET6 AF_INET6 # define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM # define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM # define ASIO_OS_DEF_SOCK_RAW SOCK_RAW # define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET # define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP # define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6 # define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP # define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP # define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP # define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6 # define ASIO_OS_DEF_FIONBIO FIONBIO # define ASIO_OS_DEF_FIONREAD FIONREAD # define ASIO_OS_DEF_INADDR_ANY INADDR_ANY # define ASIO_OS_DEF_MSG_OOB MSG_OOB # define ASIO_OS_DEF_MSG_PEEK MSG_PEEK # define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE # define ASIO_OS_DEF_MSG_EOR MSG_EOR # define ASIO_OS_DEF_SHUT_RD SHUT_RD # define ASIO_OS_DEF_SHUT_WR SHUT_WR # define ASIO_OS_DEF_SHUT_RDWR SHUT_RDWR # define ASIO_OS_DEF_SOMAXCONN SOMAXCONN # define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET # define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST # define ASIO_OS_DEF_SO_DEBUG SO_DEBUG # define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE # define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE # define ASIO_OS_DEF_SO_LINGER SO_LINGER # define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF # define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF # define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT # define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT # define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR # define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY # define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF # define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL # define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP # define ASIO_OS_DEF_IP_TTL IP_TTL # define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP # define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP # define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP # define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME # define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE # define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST # if defined(AI_NUMERICSERV) # define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV # else # define ASIO_OS_DEF_AI_NUMERICSERV 0 # endif // Note: QNX Neutrino 6.3 defines AI_V4MAPPED, AI_ALL and AI_ADDRCONFIG but // does not implement them. Therefore they are specifically excluded here. # if defined(AI_V4MAPPED) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED # else # define ASIO_OS_DEF_AI_V4MAPPED 0 # endif # if defined(AI_ALL) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_ALL AI_ALL # else # define ASIO_OS_DEF_AI_ALL 0 # endif # if defined(AI_ADDRCONFIG) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG # else # define ASIO_OS_DEF_AI_ADDRCONFIG 0 # endif # if defined(IOV_MAX) const int max_iov_len = IOV_MAX; # else // POSIX platforms are not required to define IOV_MAX. const int max_iov_len = 16; # endif #endif const int custom_socket_option_level = 0xA5100000; const int enable_connection_aborted_option = 1; const int always_fail_option = 2; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_TYPES_HPP galera-26.4.3/asio/asio/detail/function.hpp0000664000177500017540000000166513540715002017054 0ustar dbartmy// // detail/function.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FUNCTION_HPP #define ASIO_DETAIL_FUNCTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_FUNCTION) # include #else // defined(ASIO_HAS_STD_FUNCTION) # include #endif // defined(ASIO_HAS_STD_FUNCTION) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_FUNCTION) using std::function; #else // defined(ASIO_HAS_STD_FUNCTION) using boost::function; #endif // defined(ASIO_HAS_STD_FUNCTION) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_FUNCTION_HPP galera-26.4.3/asio/asio/detail/winrt_socket_send_op.hpp0000664000177500017540000000607713540715002021453 0ustar dbartmy// // detail/winrt_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_send_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_send_op); winrt_socket_send_op(const ConstBufferSequence& buffers, Handler& handler) : winrt_async_op(&winrt_socket_send_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->result_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: ConstBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP galera-26.4.3/asio/asio/detail/winrt_resolver_service.hpp0000664000177500017540000001172613540715002022032 0ustar dbartmy// // detail/winrt_resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP #define ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/winrt_async_manager.hpp" #include "asio/detail/winrt_resolve_op.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_resolver_service { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the asynchronous operation that the operation has been // cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The query type. typedef asio::ip::basic_resolver_query query_type; // The iterator type. typedef asio::ip::basic_resolver_iterator iterator_type; // Constructor. winrt_resolver_service(asio::io_service& io_service) : io_service_(use_service(io_service)), async_manager_(use_service(io_service)) { } // Destructor. ~winrt_resolver_service() { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Perform any fork-related housekeeping. void fork_service(asio::io_service::fork_event) { } // Construct a new resolver implementation. void construct(implementation_type&) { } // Destroy a resolver implementation. void destroy(implementation_type&) { } // Cancel pending asynchronous operations. void cancel(implementation_type&) { } // Resolve a query to a list of entries. iterator_type resolve(implementation_type&, const query_type& query, asio::error_code& ec) { try { using namespace Windows::Networking::Sockets; auto endpoint_pairs = async_manager_.sync( DatagramSocket::GetEndpointPairsAsync( winrt_utils::host_name(query.host_name()), winrt_utils::string(query.service_name())), ec); if (ec) return iterator_type(); return iterator_type::create( endpoint_pairs, query.hints(), query.host_name(), query.service_name()); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return iterator_type(); } } // Asynchronously resolve a query to a list of entries. template void async_resolve(implementation_type&, const query_type& query, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_resolve_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(query, handler); ASIO_HANDLER_CREATION((p.p, "resolver", &impl, "async_resolve")); try { using namespace Windows::Networking::Sockets; async_manager_.async(DatagramSocket::GetEndpointPairsAsync( winrt_utils::host_name(query.host_name()), winrt_utils::string(query.service_name())), p.p); p.v = p.p = 0; } catch (Platform::Exception^ e) { p.p->ec_ = asio::error_code( e->HResult, asio::system_category()); io_service_.post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; } } // Resolve an endpoint to a list of entries. iterator_type resolve(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return iterator_type(); } // Asynchronously resolve an endpoint to a list of entries. template void async_resolve(implementation_type&, const endpoint_type&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const iterator_type iterator; io_service_.get_io_service().post( detail::bind_handler(handler, ec, iterator)); } private: io_service_impl& io_service_; winrt_async_manager& async_manager_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP galera-26.4.3/asio/asio/detail/null_thread.hpp0000664000177500017540000000231713540715002017523 0ustar dbartmy// // detail/null_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_THREAD_HPP #define ASIO_DETAIL_NULL_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_thread : private noncopyable { public: // Constructor. template null_thread(Function, unsigned int = 0) { asio::detail::throw_error( asio::error::operation_not_supported, "thread"); } // Destructor. ~null_thread() { } // Wait for the thread to exit. void join() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_THREAD_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_connect_op.hpp0000664000177500017540000000674013540715002022614 0ustar dbartmy// // detail/win_iocp_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_socket_connect_op_base : public reactor_op { public: win_iocp_socket_connect_op_base(socket_type socket, func_type complete_func) : reactor_op(&win_iocp_socket_connect_op_base::do_perform, complete_func), socket_(socket), connect_ex_(false) { } static bool do_perform(reactor_op* base) { win_iocp_socket_connect_op_base* o( static_cast(base)); return socket_ops::non_blocking_connect(o->socket_, o->ec_); } socket_type socket_; bool connect_ex_; }; template class win_iocp_socket_connect_op : public win_iocp_socket_connect_op_base { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_connect_op); win_iocp_socket_connect_op(socket_type socket, Handler& handler) : win_iocp_socket_connect_op_base(socket, &win_iocp_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_connect_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner) { if (o->connect_ex_) socket_ops::complete_iocp_connect(o->socket_, ec); else ec = o->ec_; } ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP galera-26.4.3/asio/asio/detail/throw_exception.hpp0000664000177500017540000000257713540715002020453 0ustar dbartmy// // detail/throw_exception.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THROW_EXCEPTION_HPP #define ASIO_DETAIL_THROW_EXCEPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_THROW_EXCEPTION) # include #endif // defined(ASIO_BOOST_THROW_EXCEPTION) namespace asio { namespace detail { #if defined(ASIO_HAS_BOOST_THROW_EXCEPTION) using boost::throw_exception; #else // defined(ASIO_HAS_BOOST_THROW_EXCEPTION) // Declare the throw_exception function for all targets. template void throw_exception(const Exception& e); // Only define the throw_exception function when exceptions are enabled. // Otherwise, it is up to the application to provide a definition of this // function. # if !defined(ASIO_NO_EXCEPTIONS) template void throw_exception(const Exception& e) { throw e; } # endif // !defined(ASIO_NO_EXCEPTIONS) #endif // defined(ASIO_HAS_BOOST_THROW_EXCEPTION) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THROW_EXCEPTION_HPP galera-26.4.3/asio/asio/detail/posix_event.hpp0000664000177500017540000000517613540715002017573 0ustar dbartmy// // detail/posix_event.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_EVENT_HPP #define ASIO_DETAIL_POSIX_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_event : private noncopyable { public: // Constructor. ASIO_DECL posix_event(); // Destructor. ~posix_event() { ::pthread_cond_destroy(&cond_); } // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; ::pthread_cond_broadcast(&cond_); // Ignore EINVAL. } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) ::pthread_cond_signal(&cond_); // Ignore EINVAL. } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); ::pthread_cond_signal(&cond_); // Ignore EINVAL. return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); while ((state_ & 1) == 0) { state_ += 2; ::pthread_cond_wait(&cond_, &lock.mutex().mutex_); // Ignore EINVAL. state_ -= 2; } } private: ::pthread_cond_t cond_; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_EVENT_HPP galera-26.4.3/asio/asio/detail/wrapped_handler.hpp0000664000177500017540000001770513540715002020370 0ustar dbartmy// // detail/wrapped_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WRAPPED_HANDLER_HPP #define ASIO_DETAIL_WRAPPED_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/bind_handler.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct is_continuation_delegated { template bool operator()(Dispatcher&, Handler& handler) const { return asio_handler_cont_helpers::is_continuation(handler); } }; struct is_continuation_if_running { template bool operator()(Dispatcher& dispatcher, Handler&) const { return dispatcher.running_in_this_thread(); } }; template class wrapped_handler { public: typedef void result_type; wrapped_handler(Dispatcher dispatcher, Handler& handler) : dispatcher_(dispatcher), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } #if defined(ASIO_HAS_MOVE) wrapped_handler(const wrapped_handler& other) : dispatcher_(other.dispatcher_), handler_(other.handler_) { } wrapped_handler(wrapped_handler&& other) : dispatcher_(other.dispatcher_), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { dispatcher_.dispatch(ASIO_MOVE_CAST(Handler)(handler_)); } void operator()() const { dispatcher_.dispatch(handler_); } template void operator()(const Arg1& arg1) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1)); } template void operator()(const Arg1& arg1) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1)); } template void operator()(const Arg1& arg1, const Arg2& arg2) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2)); } template void operator()(const Arg1& arg1, const Arg2& arg2) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) const { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) const { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5)); } //private: Dispatcher dispatcher_; Handler handler_; }; template class rewrapped_handler { public: explicit rewrapped_handler(Handler& handler, const Context& context) : context_(context), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } explicit rewrapped_handler(const Handler& handler, const Context& context) : context_(context), handler_(handler) { } #if defined(ASIO_HAS_MOVE) rewrapped_handler(const rewrapped_handler& other) : context_(other.context_), handler_(other.handler_) { } rewrapped_handler(rewrapped_handler&& other) : context_(ASIO_MOVE_CAST(Context)(other.context_)), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(); } void operator()() const { handler_(); } //private: Context context_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, wrapped_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, wrapped_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( wrapped_handler* this_handler) { return IsContinuation()(this_handler->dispatcher_, this_handler->handler_); } template inline void asio_handler_invoke(Function& function, wrapped_handler* this_handler) { this_handler->dispatcher_.dispatch( rewrapped_handler( function, this_handler->handler_)); } template inline void asio_handler_invoke(const Function& function, wrapped_handler* this_handler) { this_handler->dispatcher_.dispatch( rewrapped_handler( function, this_handler->handler_)); } template inline void* asio_handler_allocate(std::size_t size, rewrapped_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->context_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, rewrapped_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->context_); } template inline bool asio_handler_is_continuation( rewrapped_handler* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->context_); } template inline void asio_handler_invoke(Function& function, rewrapped_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->context_); } template inline void asio_handler_invoke(const Function& function, rewrapped_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->context_); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WRAPPED_HANDLER_HPP galera-26.4.3/asio/asio/detail/fd_set_adapter.hpp0000664000177500017540000000171313540715002020165 0ustar dbartmy// // detail/fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/posix_fd_set_adapter.hpp" #include "asio/detail/win_fd_set_adapter.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef win_fd_set_adapter fd_set_adapter; #else typedef posix_fd_set_adapter fd_set_adapter; #endif } // namespace detail } // namespace asio #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_FD_SET_ADAPTER_HPP galera-26.4.3/asio/asio/detail/signal_blocker.hpp0000664000177500017540000000235313540715002020200 0ustar dbartmy// // detail/signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) || defined(__SYMBIAN32__) # include "asio/detail/null_signal_blocker.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_signal_blocker.hpp" #else # error Only Windows and POSIX are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) || defined(__SYMBIAN32__) typedef null_signal_blocker signal_blocker; #elif defined(ASIO_HAS_PTHREADS) typedef posix_signal_blocker signal_blocker; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_SIGNAL_BLOCKER_HPP galera-26.4.3/asio/asio/detail/winsock_init.hpp0000664000177500017540000000602213540715002017717 0ustar dbartmy// // detail/winsock_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINSOCK_INIT_HPP #define ASIO_DETAIL_WINSOCK_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winsock_init_base { protected: // Structure to track result of initialisation and number of uses. POD is used // to ensure that the values are zero-initialised prior to any code being run. struct data { long init_count_; long result_; }; ASIO_DECL static void startup(data& d, unsigned char major, unsigned char minor); ASIO_DECL static void manual_startup(data& d); ASIO_DECL static void cleanup(data& d); ASIO_DECL static void manual_cleanup(data& d); ASIO_DECL static void throw_on_error(data& d); }; template class winsock_init : private winsock_init_base { public: winsock_init(bool allow_throw = true) { startup(data_, Major, Minor); if (allow_throw) throw_on_error(data_); } winsock_init(const winsock_init&) { startup(data_, Major, Minor); throw_on_error(data_); } ~winsock_init() { cleanup(data_); } // This class may be used to indicate that user code will manage Winsock // initialisation and cleanup. This may be required in the case of a DLL, for // example, where it is not safe to initialise Winsock from global object // constructors. // // To prevent asio from initialising Winsock, the object must be constructed // before any Asio's own global objects. With MSVC, this may be accomplished // by adding the following code to the DLL: // // #pragma warning(push) // #pragma warning(disable:4073) // #pragma init_seg(lib) // asio::detail::winsock_init<>::manual manual_winsock_init; // #pragma warning(pop) class manual { public: manual() { manual_startup(data_); } manual(const manual&) { manual_startup(data_); } ~manual() { manual_cleanup(data_); } }; private: friend class manual; static data data_; }; template winsock_init_base::data winsock_init::data_; // Static variable to ensure that winsock is initialised before main, and // therefore before any other threads can get started. static const winsock_init<>& winsock_init_instance = winsock_init<>(false); } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winsock_init.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_WINSOCK_INIT_HPP galera-26.4.3/asio/asio/detail/scoped_ptr.hpp0000664000177500017540000000236113540715002017363 0ustar dbartmy// // detail/scoped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCOPED_PTR_HPP #define ASIO_DETAIL_SCOPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class scoped_ptr { public: // Constructor. explicit scoped_ptr(T* p = 0) : p_(p) { } // Destructor. ~scoped_ptr() { delete p_; } // Access. T* get() { return p_; } // Access. T* operator->() { return p_; } // Dereference. T& operator*() { return *p_; } // Reset pointer. void reset(T* p = 0) { delete p_; p_ = p; } private: // Disallow copying and assignment. scoped_ptr(const scoped_ptr&); scoped_ptr& operator=(const scoped_ptr&); T* p_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCOPED_PTR_HPP galera-26.4.3/asio/asio/detail/win_object_handle_service.hpp0000664000177500017540000001275513540715002022407 0ustar dbartmy// // detail/win_object_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP #define ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #include "asio/detail/addressof.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/wait_handler.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_object_handle_service { public: // The native type of an object handle. typedef HANDLE native_handle_type; // The implementation type of the object handle. class implementation_type { public: // Default constructor. implementation_type() : handle_(INVALID_HANDLE_VALUE), wait_handle_(INVALID_HANDLE_VALUE), owner_(0), next_(0), prev_(0) { } private: // Only this service will have access to the internal values. friend class win_object_handle_service; // The native object handle representation. May be accessed or modified // without locking the mutex. native_handle_type handle_; // The handle used to unregister the wait operation. The mutex must be // locked when accessing or modifying this member. HANDLE wait_handle_; // The operations waiting on the object handle. If there is a registered // wait then the mutex must be locked when accessing or modifying this // member op_queue op_queue_; // The service instance that owns the object handle implementation. win_object_handle_service* owner_; // Pointers to adjacent handle implementations in linked list. The mutex // must be locked when accessing or modifying these members. implementation_type* next_; implementation_type* prev_; }; // Constructor. ASIO_DECL win_object_handle_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new handle implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new handle implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another handle implementation. ASIO_DECL void move_assign(implementation_type& impl, win_object_handle_service& other_service, implementation_type& other_impl); // Destroy a handle implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native handle to a handle implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec); // Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return impl.handle_ != INVALID_HANDLE_VALUE && impl.handle_ != 0; } // Destroy a handle implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native handle representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.handle_; } // Cancel all operations associated with the handle. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Perform a synchronous wait for the object to enter a signalled state. ASIO_DECL void wait(implementation_type& impl, asio::error_code& ec); /// Start an asynchronous wait. template void async_wait(implementation_type& impl, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef wait_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "object_handle", &impl, "async_wait")); start_wait_op(impl, p.p); p.v = p.p = 0; } private: // Helper function to start an asynchronous wait operation. ASIO_DECL void start_wait_op(implementation_type& impl, wait_op* op); // Helper function to register a wait operation. ASIO_DECL void register_wait_callback( implementation_type& impl, mutex::scoped_lock& lock); // Callback function invoked when the registered wait completes. static ASIO_DECL VOID CALLBACK wait_callback( PVOID param, BOOLEAN timeout); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal state. mutex mutex_; // The head of a linked list of all implementations. implementation_type* impl_list_; // Flag to indicate that the dispatcher has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_object_handle_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #endif // ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP galera-26.4.3/asio/asio/detail/winrt_async_op.hpp0000664000177500017540000000255413540715002020263 0ustar dbartmy// // detail/winrt_async_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_ASYNC_OP_HPP #define ASIO_DETAIL_WINRT_ASYNC_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_async_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The result of the operation, to be passed to the completion handler. TResult result_; protected: winrt_async_op(func_type complete_func) : operation(complete_func), result_() { } }; template <> class winrt_async_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: winrt_async_op(func_type complete_func) : operation(complete_func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WINRT_ASYNC_OP_HPP galera-26.4.3/asio/asio/detail/handler_tracking.hpp0000664000177500017540000001111413540715002020514 0ustar dbartmy// // detail/handler_tracking.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_TRACKING_HPP #define ASIO_DETAIL_HANDLER_TRACKING_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_HANDLER_TRACKING) # include "asio/error_code.hpp" # include "asio/detail/cstdint.hpp" # include "asio/detail/static_mutex.hpp" # include "asio/detail/tss_ptr.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_ENABLE_HANDLER_TRACKING) class handler_tracking { public: class completion; // Base class for objects containing tracked handlers. class tracked_handler { private: // Only the handler_tracking class will have access to the id. friend class handler_tracking; friend class completion; uint64_t id_; protected: // Constructor initialises with no id. tracked_handler() : id_(0) {} // Prevent deletion through this type. ~tracked_handler() {} }; // Initialise the tracking system. ASIO_DECL static void init(); // Record the creation of a tracked handler. ASIO_DECL static void creation(tracked_handler* h, const char* object_type, void* object, const char* op_name); class completion { public: // Constructor records that handler is to be invoked with no arguments. ASIO_DECL explicit completion(tracked_handler* h); // Destructor records only when an exception is thrown from the handler, or // if the memory is being freed without the handler having been invoked. ASIO_DECL ~completion(); // Records that handler is to be invoked with no arguments. ASIO_DECL void invocation_begin(); // Records that handler is to be invoked with one arguments. ASIO_DECL void invocation_begin(const asio::error_code& ec); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, std::size_t bytes_transferred); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, int signal_number); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, const char* arg); // Record that handler invocation has ended. ASIO_DECL void invocation_end(); private: friend class handler_tracking; uint64_t id_; bool invoked_; completion* next_; }; // Record an operation that affects pending handlers. ASIO_DECL static void operation(const char* object_type, void* object, const char* op_name); // Write a line of output. ASIO_DECL static void write_line(const char* format, ...); private: struct tracking_state; ASIO_DECL static tracking_state* get_state(); }; # define ASIO_INHERIT_TRACKED_HANDLER \ : public asio::detail::handler_tracking::tracked_handler # define ASIO_ALSO_INHERIT_TRACKED_HANDLER \ , public asio::detail::handler_tracking::tracked_handler # define ASIO_HANDLER_TRACKING_INIT \ asio::detail::handler_tracking::init() # define ASIO_HANDLER_CREATION(args) \ asio::detail::handler_tracking::creation args # define ASIO_HANDLER_COMPLETION(args) \ asio::detail::handler_tracking::completion tracked_completion args # define ASIO_HANDLER_INVOCATION_BEGIN(args) \ tracked_completion.invocation_begin args # define ASIO_HANDLER_INVOCATION_END \ tracked_completion.invocation_end() # define ASIO_HANDLER_OPERATION(args) \ asio::detail::handler_tracking::operation args #else // defined(ASIO_ENABLE_HANDLER_TRACKING) # define ASIO_INHERIT_TRACKED_HANDLER # define ASIO_ALSO_INHERIT_TRACKED_HANDLER # define ASIO_HANDLER_TRACKING_INIT (void)0 # define ASIO_HANDLER_CREATION(args) (void)0 # define ASIO_HANDLER_COMPLETION(args) (void)0 # define ASIO_HANDLER_INVOCATION_BEGIN(args) (void)0 # define ASIO_HANDLER_INVOCATION_END (void)0 # define ASIO_HANDLER_OPERATION(args) (void)0 #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/handler_tracking.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_HANDLER_TRACKING_HPP galera-26.4.3/asio/asio/detail/null_event.hpp0000664000177500017540000000311213540715002017367 0ustar dbartmy// // detail/null_event.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_EVENT_HPP #define ASIO_DETAIL_NULL_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_event : private noncopyable { public: // Constructor. null_event() { } // Destructor. ~null_event() { } // Signal the event. (Retained for backward compatibility.) template void signal(Lock&) { } // Signal all waiters. template void signal_all(Lock&) { } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock&) { } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock&) { return false; } // Reset the event. template void clear(Lock&) { } // Wait for the event to become signalled. template void wait(Lock&) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_EVENT_HPP galera-26.4.3/asio/asio/detail/chrono_time_traits.hpp0000664000177500017540000001045213540715002021115 0ustar dbartmy// // detail/chrono_time_traits.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP #define ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/cstdint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper template to compute the greatest common divisor. template struct gcd { enum { value = gcd::value }; }; template struct gcd { enum { value = v1 }; }; // Adapts std::chrono clocks for use with a deadline timer. template struct chrono_time_traits { // The clock type. typedef Clock clock_type; // The duration type of the clock. typedef typename clock_type::duration duration_type; // The time point type of the clock. typedef typename clock_type::time_point time_type; // The period of the clock. typedef typename duration_type::period period_type; // Get the current time. static time_type now() { return clock_type::now(); } // Add a duration to a time. static time_type add(const time_type& t, const duration_type& d) { const time_type epoch; if (t >= epoch) { if ((time_type::max)() - t < d) return (time_type::max)(); } else // t < epoch { if (-(t - (time_type::min)()) > d) return (time_type::min)(); } return t + d; } // Subtract one time from another. static duration_type subtract(const time_type& t1, const time_type& t2) { const time_type epoch; if (t1 >= epoch) { if (t2 >= epoch) { return t1 - t2; } else if (t2 == (time_type::min)()) { return (duration_type::max)(); } else if ((time_type::max)() - t1 < epoch - t2) { return (duration_type::max)(); } else { return t1 - t2; } } else // t1 < epoch { if (t2 < epoch) { return t1 - t2; } else if (t1 == (time_type::min)()) { return (duration_type::min)(); } else if ((time_type::max)() - t2 < epoch - t1) { return (duration_type::min)(); } else { return -(t2 - t1); } } } // Test whether one time is less than another. static bool less_than(const time_type& t1, const time_type& t2) { return t1 < t2; } // Implement just enough of the posix_time::time_duration interface to supply // what the timer_queue requires. class posix_time_duration { public: explicit posix_time_duration(const duration_type& d) : d_(d) { } int64_t ticks() const { return d_.count(); } int64_t total_seconds() const { return duration_cast<1, 1>(); } int64_t total_milliseconds() const { return duration_cast<1, 1000>(); } int64_t total_microseconds() const { return duration_cast<1, 1000000>(); } private: template int64_t duration_cast() const { const int64_t num1 = period_type::num / gcd::value; const int64_t num2 = Num / gcd::value; const int64_t den1 = period_type::den / gcd::value; const int64_t den2 = Den / gcd::value; const int64_t num = num1 * den2; const int64_t den = num2 * den1; if (num == 1 && den == 1) return ticks(); else if (num != 1 && den == 1) return ticks() * num; else if (num == 1 && period_type::den != 1) return ticks() / den; else return ticks() * num / den; } duration_type d_; }; // Convert to POSIX duration type. static posix_time_duration to_posix_duration(const duration_type& d) { return posix_time_duration(WaitTraits::to_wait_duration(d)); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP galera-26.4.3/asio/asio/detail/signal_set_service.hpp0000664000177500017540000001402313540715002021067 0ustar dbartmy// // detail/signal_set_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP #define ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/signal_handler.hpp" #include "asio/detail/signal_op.hpp" #include "asio/detail/socket_types.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) # include "asio/detail/reactor.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(NSIG) && (NSIG > 0) enum { max_signal_number = NSIG }; #else enum { max_signal_number = 128 }; #endif extern ASIO_DECL struct signal_state* get_signal_state(); extern "C" ASIO_DECL void asio_signal_handler(int signal_number); class signal_set_service { public: // Type used for tracking an individual signal registration. class registration { public: // Default constructor. registration() : signal_number_(0), queue_(0), undelivered_(0), next_in_table_(0), prev_in_table_(0), next_in_set_(0) { } private: // Only this service will have access to the internal values. friend class signal_set_service; // The signal number that is registered. int signal_number_; // The waiting signal handlers. op_queue* queue_; // The number of undelivered signals. std::size_t undelivered_; // Pointers to adjacent registrations in the registrations_ table. registration* next_in_table_; registration* prev_in_table_; // Link to next registration in the signal set. registration* next_in_set_; }; // The implementation type of the signal_set. class implementation_type { public: // Default constructor. implementation_type() : signals_(0) { } private: // Only this service will have access to the internal values. friend class signal_set_service; // The pending signal handlers. op_queue queue_; // Linked list of registered signals. registration* signals_; }; // Constructor. ASIO_DECL signal_set_service(asio::io_service& io_service); // Destructor. ASIO_DECL ~signal_set_service(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Perform fork-related housekeeping. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Construct a new signal_set implementation. ASIO_DECL void construct(implementation_type& impl); // Destroy a signal_set implementation. ASIO_DECL void destroy(implementation_type& impl); // Add a signal to a signal_set. ASIO_DECL asio::error_code add(implementation_type& impl, int signal_number, asio::error_code& ec); // Remove a signal to a signal_set. ASIO_DECL asio::error_code remove(implementation_type& impl, int signal_number, asio::error_code& ec); // Remove all signals from a signal_set. ASIO_DECL asio::error_code clear(implementation_type& impl, asio::error_code& ec); // Cancel all operations associated with the signal set. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Start an asynchronous operation to wait for a signal to be delivered. template void async_wait(implementation_type& impl, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef signal_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "signal_set", &impl, "async_wait")); start_wait_op(impl, p.p); p.v = p.p = 0; } // Deliver notification that a particular signal occurred. ASIO_DECL static void deliver_signal(int signal_number); private: // Helper function to add a service to the global signal state. ASIO_DECL static void add_service(signal_set_service* service); // Helper function to remove a service from the global signal state. ASIO_DECL static void remove_service(signal_set_service* service); // Helper function to create the pipe descriptors. ASIO_DECL static void open_descriptors(); // Helper function to close the pipe descriptors. ASIO_DECL static void close_descriptors(); // Helper function to start a wait operation. ASIO_DECL void start_wait_op(implementation_type& impl, signal_op* op); // The io_service instance used for dispatching handlers. io_service_impl& io_service_; #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // The type used for registering for pipe reactor notifications. class pipe_read_op; // The reactor used for waiting for pipe readiness. reactor& reactor_; // The per-descriptor reactor data used for the pipe. reactor::per_descriptor_data reactor_data_; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) // A mapping from signal number to the registered signal sets. registration* registrations_[max_signal_number]; // Pointers to adjacent services in linked list. signal_set_service* next_; signal_set_service* prev_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/signal_set_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP galera-26.4.3/asio/asio/detail/posix_mutex.hpp0000664000177500017540000000302313540715002017601 0ustar dbartmy// // detail/posix_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_MUTEX_HPP #define ASIO_DETAIL_POSIX_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_event; class posix_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. ASIO_DECL posix_mutex(); // Destructor. ~posix_mutex() { ::pthread_mutex_destroy(&mutex_); // Ignore EBUSY. } // Lock the mutex. void lock() { (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL. } // Unlock the mutex. void unlock() { (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL. } private: friend class posix_event; ::pthread_mutex_t mutex_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_MUTEX_HPP galera-26.4.3/asio/asio/detail/gcc_x86_fenced_block.hpp0000664000177500017540000000444313540715002021143 0ustar dbartmy// // detail/gcc_x86_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_x86_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_x86_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_x86_fenced_block(full_t) { lbarrier(); } // Destructor. ~gcc_x86_fenced_block() { sbarrier(); } private: static int barrier() { int r = 0, m = 1; __asm__ __volatile__ ( "xchgl %0, %1" : "=r"(r), "=m"(m) : "0"(1), "m"(m) : "memory", "cc"); return r; } static void lbarrier() { #if defined(__SSE2__) # if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __builtin_ia32_lfence(); # else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __asm__ __volatile__ ("lfence" ::: "memory"); # endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) #else // defined(__SSE2__) barrier(); #endif // defined(__SSE2__) } static void sbarrier() { #if defined(__SSE2__) # if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __builtin_ia32_sfence(); # else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __asm__ __volatile__ ("sfence" ::: "memory"); # endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) #else // defined(__SSE2__) barrier(); #endif // defined(__SSE2__) } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #endif // ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/thread.hpp0000664000177500017540000000261413540715002016471 0ustar dbartmy// // detail/thread.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_HPP #define ASIO_DETAIL_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_thread.hpp" #elif defined(ASIO_WINDOWS) # if defined(ASIO_WINDOWS_APP) || defined(UNDER_CE) # include "asio/detail/winapi_thread.hpp" # else # include "asio/detail/win_thread.hpp" # endif #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_thread.hpp" #elif defined(ASIO_HAS_STD_THREAD) # include "asio/detail/std_thread.hpp" #else # error Only Windows, POSIX and std::thread are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_thread thread; #elif defined(ASIO_WINDOWS) # if defined(ASIO_WINDOWS_APP) || defined(UNDER_CE) typedef winapi_thread thread; # else typedef win_thread thread; # endif #elif defined(ASIO_HAS_PTHREADS) typedef posix_thread thread; #elif defined(ASIO_HAS_STD_THREAD) typedef std_thread thread; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THREAD_HPP galera-26.4.3/asio/asio/detail/dependent_type.hpp0000664000177500017540000000146013540715002020227 0ustar dbartmy// // detail/dependent_type.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEPENDENT_TYPE_HPP #define ASIO_DETAIL_DEPENDENT_TYPE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct dependent_type { typedef T type; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_DEPENDENT_TYPE_HPP galera-26.4.3/asio/asio/detail/winrt_socket_recv_op.hpp0000664000177500017540000000663613540715002021462 0ustar dbartmy// // detail/winrt_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_recv_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_recv_op); winrt_socket_recv_op(const MutableBufferSequence& buffers, Handler& handler) : winrt_async_op( &winrt_socket_recv_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) std::size_t bytes_transferred = o->result_ ? o->result_->Length : 0; if (bytes_transferred == 0 && !o->ec_ && !buffer_sequence_adapter::all_empty(o->buffers_)) { o->ec_ = asio::error::eof; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP galera-26.4.3/asio/asio/detail/win_iocp_handle_write_op.hpp0000664000177500017540000000622413540715002022255 0ustar dbartmy// // detail/win_iocp_handle_write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_handle_write_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_write_op); win_iocp_handle_write_op(const ConstBufferSequence& buffers, Handler& handler) : operation(&win_iocp_handle_write_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { // Take ownership of the operation object. win_iocp_handle_write_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (owner) { // Check whether buffers are still valid. buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: ConstBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP galera-26.4.3/asio/asio/detail/winrt_timer_scheduler.hpp0000664000177500017540000000757613540715002021637 0ustar dbartmy// // detail/winrt_timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/event.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/thread.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_timer_scheduler : public asio::detail::service_base { public: // Constructor. ASIO_DECL winrt_timer_scheduler(asio::io_service& io_service); // Destructor. ASIO_DECL ~winrt_timer_scheduler(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. No effect as this class uses its own thread. ASIO_DECL void init_task(); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); private: // Run the select loop in the thread. ASIO_DECL void run_thread(); // Entry point for the select loop thread. ASIO_DECL static void call_run_thread(winrt_timer_scheduler* reactor); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex used to protect internal variables. asio::detail::mutex mutex_; // Event used to wake up background thread. asio::detail::event event_; // The timer queues. timer_queue_set timer_queues_; // The background thread that is waiting for timers to expire. asio::detail::thread* thread_; // Does the background thread need to stop. bool stop_thread_; // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/winrt_timer_scheduler.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winrt_timer_scheduler.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP galera-26.4.3/asio/asio/detail/task_io_service_thread_info.hpp0000664000177500017540000000203413540715002022731 0ustar dbartmy// // detail/task_io_service_thread_info.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TASK_IO_SERVICE_THREAD_INFO_HPP #define ASIO_DETAIL_TASK_IO_SERVICE_THREAD_INFO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/op_queue.hpp" #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class task_io_service; class task_io_service_operation; struct task_io_service_thread_info : public thread_info_base { op_queue private_op_queue; long private_outstanding_work; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TASK_IO_SERVICE_THREAD_INFO_HPP galera-26.4.3/asio/asio/detail/hash_map.hpp0000664000177500017540000001777213540715002017015 0ustar dbartmy// // detail/hash_map.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HASH_MAP_HPP #define ASIO_DETAIL_HASH_MAP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/socket_types.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { inline std::size_t calculate_hash_value(int i) { return static_cast(i); } inline std::size_t calculate_hash_value(void* p) { return reinterpret_cast(p) + (reinterpret_cast(p) >> 3); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) inline std::size_t calculate_hash_value(SOCKET s) { return static_cast(s); } #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Note: assumes K and V are POD types. template class hash_map : private noncopyable { public: // The type of a value in the map. typedef std::pair value_type; // The type of a non-const iterator over the hash map. typedef typename std::list::iterator iterator; // The type of a const iterator over the hash map. typedef typename std::list::const_iterator const_iterator; // Constructor. hash_map() : size_(0), buckets_(0), num_buckets_(0) { } // Destructor. ~hash_map() { delete[] buckets_; } // Get an iterator for the beginning of the map. iterator begin() { return values_.begin(); } // Get an iterator for the beginning of the map. const_iterator begin() const { return values_.begin(); } // Get an iterator for the end of the map. iterator end() { return values_.end(); } // Get an iterator for the end of the map. const_iterator end() const { return values_.end(); } // Check whether the map is empty. bool empty() const { return values_.empty(); } // Find an entry in the map. iterator find(const K& k) { if (num_buckets_) { size_t bucket = calculate_hash_value(k) % num_buckets_; iterator it = buckets_[bucket].first; if (it == values_.end()) return values_.end(); iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == k) return it; ++it; } } return values_.end(); } // Find an entry in the map. const_iterator find(const K& k) const { if (num_buckets_) { size_t bucket = calculate_hash_value(k) % num_buckets_; const_iterator it = buckets_[bucket].first; if (it == values_.end()) return it; const_iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == k) return it; ++it; } } return values_.end(); } // Insert a new entry into the map. std::pair insert(const value_type& v) { if (size_ + 1 >= num_buckets_) rehash(hash_size(size_ + 1)); size_t bucket = calculate_hash_value(v.first) % num_buckets_; iterator it = buckets_[bucket].first; if (it == values_.end()) { buckets_[bucket].first = buckets_[bucket].last = values_insert(values_.end(), v); ++size_; return std::pair(buckets_[bucket].last, true); } iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == v.first) return std::pair(it, false); ++it; } buckets_[bucket].last = values_insert(end_it, v); ++size_; return std::pair(buckets_[bucket].last, true); } // Erase an entry from the map. void erase(iterator it) { ASIO_ASSERT(it != values_.end()); ASIO_ASSERT(num_buckets_ != 0); size_t bucket = calculate_hash_value(it->first) % num_buckets_; bool is_first = (it == buckets_[bucket].first); bool is_last = (it == buckets_[bucket].last); if (is_first && is_last) buckets_[bucket].first = buckets_[bucket].last = values_.end(); else if (is_first) ++buckets_[bucket].first; else if (is_last) --buckets_[bucket].last; values_erase(it); --size_; } // Erase a key from the map. void erase(const K& k) { iterator it = find(k); if (it != values_.end()) erase(it); } // Remove all entries from the map. void clear() { // Clear the values. values_.clear(); size_ = 0; // Initialise all buckets to empty. iterator end_it = values_.end(); for (size_t i = 0; i < num_buckets_; ++i) buckets_[i].first = buckets_[i].last = end_it; } private: // Calculate the hash size for the specified number of elements. static std::size_t hash_size(std::size_t num_elems) { static std::size_t sizes[] = { #if defined(ASIO_HASH_MAP_BUCKETS) ASIO_HASH_MAP_BUCKETS #else // ASIO_HASH_MAP_BUCKETS 3, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593, 49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469, 12582917, 25165843 #endif // ASIO_HASH_MAP_BUCKETS }; const std::size_t nth_size = sizeof(sizes) / sizeof(std::size_t) - 1; for (std::size_t i = 0; i < nth_size; ++i) if (num_elems < sizes[i]) return sizes[i]; return sizes[nth_size]; } // Re-initialise the hash from the values already contained in the list. void rehash(std::size_t num_buckets) { if (num_buckets == num_buckets_) return; num_buckets_ = num_buckets; ASIO_ASSERT(num_buckets_ != 0); iterator end_iter = values_.end(); // Update number of buckets and initialise all buckets to empty. bucket_type* tmp = new bucket_type[num_buckets_]; delete[] buckets_; buckets_ = tmp; for (std::size_t i = 0; i < num_buckets_; ++i) buckets_[i].first = buckets_[i].last = end_iter; // Put all values back into the hash. iterator iter = values_.begin(); while (iter != end_iter) { std::size_t bucket = calculate_hash_value(iter->first) % num_buckets_; if (buckets_[bucket].last == end_iter) { buckets_[bucket].first = buckets_[bucket].last = iter++; } else if (++buckets_[bucket].last == iter) { ++iter; } else { values_.splice(buckets_[bucket].last, values_, iter++); --buckets_[bucket].last; } } } // Insert an element into the values list by splicing from the spares list, // if a spare is available, and otherwise by inserting a new element. iterator values_insert(iterator it, const value_type& v) { if (spares_.empty()) { return values_.insert(it, v); } else { spares_.front() = v; values_.splice(it, spares_, spares_.begin()); return --it; } } // Erase an element from the values list by splicing it to the spares list. void values_erase(iterator it) { *it = value_type(); spares_.splice(spares_.begin(), values_, it); } // The number of elements in the hash. std::size_t size_; // The list of all values in the hash map. std::list values_; // The list of spare nodes waiting to be recycled. Assumes that POD types only // are stored in the hash map. std::list spares_; // The type for a bucket in the hash table. struct bucket_type { iterator first; iterator last; }; // The buckets in the hash. bucket_type* buckets_; // The number of buckets in the hash. std::size_t num_buckets_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HASH_MAP_HPP galera-26.4.3/asio/asio/detail/object_pool.hpp0000664000177500017540000000546013540715002017523 0ustar dbartmy// // detail/object_pool.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OBJECT_POOL_HPP #define ASIO_DETAIL_OBJECT_POOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class object_pool; class object_pool_access { public: template static Object* create() { return new Object; } template static void destroy(Object* o) { delete o; } template static Object*& next(Object* o) { return o->next_; } template static Object*& prev(Object* o) { return o->prev_; } }; template class object_pool : private noncopyable { public: // Constructor. object_pool() : live_list_(0), free_list_(0) { } // Destructor destroys all objects. ~object_pool() { destroy_list(live_list_); destroy_list(free_list_); } // Get the object at the start of the live list. Object* first() { return live_list_; } // Allocate a new object. Object* alloc() { Object* o = free_list_; if (o) free_list_ = object_pool_access::next(free_list_); else o = object_pool_access::create(); object_pool_access::next(o) = live_list_; object_pool_access::prev(o) = 0; if (live_list_) object_pool_access::prev(live_list_) = o; live_list_ = o; return o; } // Free an object. Moves it to the free list. No destructors are run. void free(Object* o) { if (live_list_ == o) live_list_ = object_pool_access::next(o); if (object_pool_access::prev(o)) { object_pool_access::next(object_pool_access::prev(o)) = object_pool_access::next(o); } if (object_pool_access::next(o)) { object_pool_access::prev(object_pool_access::next(o)) = object_pool_access::prev(o); } object_pool_access::next(o) = free_list_; object_pool_access::prev(o) = 0; free_list_ = o; } private: // Helper function to destroy all elements in a list. void destroy_list(Object* list) { while (list) { Object* o = list; list = object_pool_access::next(o); object_pool_access::destroy(o); } } // The list of live objects. Object* live_list_; // The free list. Object* free_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_OBJECT_POOL_HPP galera-26.4.3/asio/asio/detail/std_thread.hpp0000664000177500017540000000226613540715002017346 0ustar dbartmy// // detail/std_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_THREAD_HPP #define ASIO_DETAIL_STD_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_THREAD) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_thread : private noncopyable { public: // Constructor. template std_thread(Function f, unsigned int = 0) : thread_(f) { } // Destructor. ~std_thread() { join(); } // Wait for the thread to exit. void join() { if (thread_.joinable()) thread_.join(); } private: std::thread thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_THREAD) #endif // ASIO_DETAIL_STD_THREAD_HPP galera-26.4.3/asio/asio/detail/win_iocp_null_buffers_op.hpp0000664000177500017540000000675713540715002022311 0ustar dbartmy// // detail/win_iocp_null_buffers_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP #define ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_null_buffers_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_null_buffers_op); win_iocp_null_buffers_op(socket_ops::weak_cancel_token_type cancel_token, Handler& handler) : reactor_op(&win_iocp_null_buffers_op::do_perform, &win_iocp_null_buffers_op::do_complete), cancel_token_(cancel_token), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static bool do_perform(reactor_op*) { return true; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_null_buffers_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // The reactor may have stored a result in the operation object. if (o->ec_) ec = o->ec_; // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (o->cancel_token_.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP galera-26.4.3/asio/asio/detail/timer_queue_set.hpp0000664000177500017540000000317213540715002020421 0ustar dbartmy// // detail/timer_queue_set.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_SET_HPP #define ASIO_DETAIL_TIMER_QUEUE_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class timer_queue_set { public: // Constructor. ASIO_DECL timer_queue_set(); // Add a timer queue to the set. ASIO_DECL void insert(timer_queue_base* q); // Remove a timer queue from the set. ASIO_DECL void erase(timer_queue_base* q); // Determine whether all queues are empty. ASIO_DECL bool all_empty() const; // Get the wait duration in milliseconds. ASIO_DECL long wait_duration_msec(long max_duration) const; // Get the wait duration in microseconds. ASIO_DECL long wait_duration_usec(long max_duration) const; // Dequeue all ready timers. ASIO_DECL void get_ready_timers(op_queue& ops); // Dequeue all timers. ASIO_DECL void get_all_timers(op_queue& ops); private: timer_queue_base* first_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/timer_queue_set.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_TIMER_QUEUE_SET_HPP galera-26.4.3/asio/asio/detail/posix_fd_set_adapter.hpp0000664000177500017540000000546213540715002021414 0ustar dbartmy// // detail/posix_fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(__CYGWIN__) \ && !defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Adapts the FD_SET type to meet the Descriptor_Set concept's requirements. class posix_fd_set_adapter : noncopyable { public: posix_fd_set_adapter() : max_descriptor_(invalid_socket) { using namespace std; // Needed for memset on Solaris. FD_ZERO(&fd_set_); } void reset() { using namespace std; // Needed for memset on Solaris. FD_ZERO(&fd_set_); } bool set(socket_type descriptor) { if (descriptor < (socket_type)FD_SETSIZE) { if (max_descriptor_ == invalid_socket || descriptor > max_descriptor_) max_descriptor_ = descriptor; FD_SET(descriptor, &fd_set_); return true; } return false; } void set(reactor_op_queue& operations, op_queue& ops) { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; if (!set(op_iter->first)) { asio::error_code ec(error::fd_set_failure); operations.cancel_operations(op_iter, ops, ec); } } } bool is_set(socket_type descriptor) const { return FD_ISSET(descriptor, &fd_set_) != 0; } operator fd_set*() { return &fd_set_; } socket_type max_descriptor() const { return max_descriptor_; } void perform(reactor_op_queue& operations, op_queue& ops) const { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; if (is_set(op_iter->first)) operations.perform_operations(op_iter, ops); } } private: mutable fd_set fd_set_; socket_type max_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(__CYGWIN__) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP galera-26.4.3/asio/asio/detail/win_iocp_serial_port_service.hpp0000664000177500017540000001630413540715002023155 0ustar dbartmy// // detail/win_iocp_serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Extend win_iocp_handle_service to provide serial port support. class win_iocp_serial_port_service { public: // The native type of a serial port. typedef win_iocp_handle_service::native_handle_type native_handle_type; // The implementation type of the serial port. typedef win_iocp_handle_service::implementation_type implementation_type; // Constructor. ASIO_DECL win_iocp_serial_port_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new serial port implementation. void construct(implementation_type& impl) { handle_service_.construct(impl); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { handle_service_.move_construct(impl, other_impl); } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, win_iocp_serial_port_service& other_service, implementation_type& other_impl) { handle_service_.move_assign(impl, other_service.handle_service_, other_impl); } // Destroy a serial port implementation. void destroy(implementation_type& impl) { handle_service_.destroy(impl); } // Open the serial port using the specified device name. ASIO_DECL asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec); // Assign a native handle to a serial port implementation. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return handle_service_.assign(impl, handle, ec); } // Determine whether the serial port is open. bool is_open(const implementation_type& impl) const { return handle_service_.is_open(impl); } // Destroy a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return handle_service_.close(impl, ec); } // Get the native serial port representation. native_handle_type native_handle(implementation_type& impl) { return handle_service_.native_handle(impl); } // Cancel all operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return handle_service_.cancel(impl, ec); } // Set an option on the serial port. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return do_set_option(impl, &win_iocp_serial_port_service::store_option, &option, ec); } // Get an option from the serial port. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return do_get_option(impl, &win_iocp_serial_port_service::load_option, &option, ec); } // Send a break sequence to the serial port. asio::error_code send_break(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Write the given data. Returns the number of bytes sent. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return handle_service_.write_some(impl, buffers, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { handle_service_.async_write_some(impl, buffers, handler); } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return handle_service_.read_some(impl, buffers, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { handle_service_.async_read_some(impl, buffers, handler); } private: // Function pointer type for storing a serial port option. typedef asio::error_code (*store_function_type)( const void*, ::DCB&, asio::error_code&); // Helper function template to store a serial port option. template static asio::error_code store_option(const void* option, ::DCB& storage, asio::error_code& ec) { return static_cast(option)->store( storage, ec); } // Helper function to set a serial port option. ASIO_DECL asio::error_code do_set_option( implementation_type& impl, store_function_type store, const void* option, asio::error_code& ec); // Function pointer type for loading a serial port option. typedef asio::error_code (*load_function_type)( void*, const ::DCB&, asio::error_code&); // Helper function template to load a serial port option. template static asio::error_code load_option(void* option, const ::DCB& storage, asio::error_code& ec) { return static_cast(option)->load(storage, ec); } // Helper function to get a serial port option. ASIO_DECL asio::error_code do_get_option( const implementation_type& impl, load_function_type load, void* option, asio::error_code& ec) const; // The implementation used for initiating asynchronous operations. win_iocp_handle_service handle_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_serial_port_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP galera-26.4.3/asio/asio/detail/type_traits.hpp0000664000177500017540000000325213540715002017570 0ustar dbartmy// // detail/type_traits.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TYPE_TRAITS_HPP #define ASIO_DETAIL_TYPE_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_TYPE_TRAITS) # include #else // defined(ASIO_HAS_TYPE_TRAITS) # include # include # include # include # include # include # include # include #endif // defined(ASIO_HAS_TYPE_TRAITS) namespace asio { #if defined(ASIO_HAS_STD_TYPE_TRAITS) using std::add_const; using std::enable_if; using std::is_const; using std::is_convertible; using std::is_function; using std::is_same; using std::remove_pointer; using std::remove_reference; #else // defined(ASIO_HAS_STD_TYPE_TRAITS) using boost::add_const; template struct enable_if : boost::enable_if_c {}; using boost::is_const; using boost::is_convertible; using boost::is_function; using boost::is_same; using boost::remove_pointer; using boost::remove_reference; #endif // defined(ASIO_HAS_STD_TYPE_TRAITS) } // namespace asio #endif // ASIO_DETAIL_TYPE_TRAITS_HPP galera-26.4.3/asio/asio/detail/array_fwd.hpp0000664000177500017540000000162213540715002017176 0ustar dbartmy// // detail/array_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ARRAY_FWD_HPP #define ASIO_DETAIL_ARRAY_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace boost { template class array; } // namespace boost // Standard library components can't be forward declared, so we'll have to // include the array header. Fortunately, it's fairly lightweight and doesn't // add significantly to the compile time. #if defined(ASIO_HAS_STD_ARRAY) # include #endif // defined(ASIO_HAS_STD_ARRAY) #endif // ASIO_DETAIL_ARRAY_FWD_HPP galera-26.4.3/asio/asio/detail/resolve_op.hpp0000664000177500017540000001006013540715002017371 0ustar dbartmy// // detail/resolve_op.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVE_OP_HPP #define ASIO_DETAIL_RESOLVE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolve_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(resolve_op); typedef asio::ip::basic_resolver_query query_type; typedef asio::ip::basic_resolver_iterator iterator_type; resolve_op(socket_ops::weak_cancel_token_type cancel_token, const query_type& query, io_service_impl& ios, Handler& handler) : operation(&resolve_op::do_complete), cancel_token_(cancel_token), query_(query), io_service_impl_(ios), handler_(ASIO_MOVE_CAST(Handler)(handler)), addrinfo_(0) { } ~resolve_op() { if (addrinfo_) socket_ops::freeaddrinfo(addrinfo_); } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the operation object. resolve_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner && owner != &o->io_service_impl_) { // The operation is being run on the worker io_service. Time to perform // the resolver operation. // Perform the blocking host resolution operation. socket_ops::background_getaddrinfo(o->cancel_token_, o->query_.host_name().c_str(), o->query_.service_name().c_str(), o->query_.hints(), &o->addrinfo_, o->ec_); // Pass operation back to main io_service for completion. o->io_service_impl_.post_deferred_completion(o); p.v = p.p = 0; } else { // The operation has been returned to the main io_service. The completion // handler is ready to be delivered. ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated // before the upcall is made. Even if we're not about to make an upcall, // a sub-object of the handler may be the true owner of the memory // associated with the handler. Consequently, a local copy of the handler // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, iterator_type()); p.h = asio::detail::addressof(handler.handler_); if (o->addrinfo_) { handler.arg2_ = iterator_type::create(o->addrinfo_, o->query_.host_name(), o->query_.service_name()); } p.reset(); if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } } private: socket_ops::weak_cancel_token_type cancel_token_; query_type query_; io_service_impl& io_service_impl_; Handler handler_; asio::error_code ec_; asio::detail::addrinfo_type* addrinfo_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVE_OP_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_recvmsg_op.hpp0000664000177500017540000000710413540715002022624 0ustar dbartmy// // detail/win_iocp_socket_recvmsg_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recvmsg_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvmsg_op); win_iocp_socket_recvmsg_op( socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, socket_base::message_flags& out_flags, Handler& handler) : operation(&win_iocp_socket_recvmsg_op::do_complete), cancel_token_(cancel_token), buffers_(buffers), out_flags_(out_flags), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recvmsg_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recvmsg(o->cancel_token_, ec); o->out_flags_ = 0; // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; socket_base::message_flags& out_flags_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP galera-26.4.3/asio/asio/detail/wait_handler.hpp0000664000177500017540000000456613540715002017673 0ustar dbartmy// // detail/wait_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WAIT_HANDLER_HPP #define ASIO_DETAIL_WAIT_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class wait_handler : public wait_op { public: ASIO_DEFINE_HANDLER_PTR(wait_handler); wait_handler(Handler& h) : wait_op(&wait_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. wait_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; ASIO_HANDLER_COMPLETION((h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(h->handler_, h->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WAIT_HANDLER_HPP galera-26.4.3/asio/asio/detail/base_from_completion_cond.hpp0000664000177500017540000000315313540715002022412 0ustar dbartmy// // detail/base_from_completion_cond.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP #define ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class base_from_completion_cond { protected: explicit base_from_completion_cond(CompletionCondition completion_condition) : completion_condition_(completion_condition) { } std::size_t check_for_completion( const asio::error_code& ec, std::size_t total_transferred) { return detail::adapt_completion_condition_result( completion_condition_(ec, total_transferred)); } private: CompletionCondition completion_condition_; }; template <> class base_from_completion_cond { protected: explicit base_from_completion_cond(transfer_all_t) { } static std::size_t check_for_completion( const asio::error_code& ec, std::size_t total_transferred) { return transfer_all_t()(ec, total_transferred); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_recvfrom_op.hpp0000664000177500017540000000737513540715002023013 0ustar dbartmy// // detail/win_iocp_socket_recvfrom_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recvfrom_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvfrom_op); win_iocp_socket_recvfrom_op(Endpoint& endpoint, socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, Handler& handler) : operation(&win_iocp_socket_recvfrom_op::do_complete), endpoint_(endpoint), endpoint_size_(static_cast(endpoint.capacity())), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } int& endpoint_size() { return endpoint_size_; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recvfrom_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recvfrom(o->cancel_token_, ec); // Record the size of the endpoint returned by the operation. o->endpoint_.resize(o->endpoint_size_); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Endpoint& endpoint_; int endpoint_size_; socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP galera-26.4.3/asio/asio/detail/fenced_block.hpp0000664000177500017540000000526513540715002017625 0ustar dbartmy// // detail/fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FENCED_BLOCK_HPP #define ASIO_DETAIL_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_DISABLE_FENCED_BLOCK) # include "asio/detail/null_fenced_block.hpp" #elif defined(__MACH__) && defined(__APPLE__) # include "asio/detail/macos_fenced_block.hpp" #elif defined(__sun) # include "asio/detail/solaris_fenced_block.hpp" #elif defined(__GNUC__) && defined(__arm__) \ && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) # include "asio/detail/gcc_arm_fenced_block.hpp" #elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) # include "asio/detail/gcc_hppa_fenced_block.hpp" #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # include "asio/detail/gcc_x86_fenced_block.hpp" #elif defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) # include "asio/detail/gcc_sync_fenced_block.hpp" #elif defined(ASIO_WINDOWS) && !defined(UNDER_CE) # include "asio/detail/win_fenced_block.hpp" #else # include "asio/detail/null_fenced_block.hpp" #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_DISABLE_FENCED_BLOCK) typedef null_fenced_block fenced_block; #elif defined(__MACH__) && defined(__APPLE__) typedef macos_fenced_block fenced_block; #elif defined(__sun) typedef solaris_fenced_block fenced_block; #elif defined(__GNUC__) && defined(__arm__) \ && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) typedef gcc_arm_fenced_block fenced_block; #elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) typedef gcc_hppa_fenced_block fenced_block; #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) typedef gcc_x86_fenced_block fenced_block; #elif defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) typedef gcc_sync_fenced_block fenced_block; #elif defined(ASIO_WINDOWS) && !defined(UNDER_CE) typedef win_fenced_block fenced_block; #else typedef null_fenced_block fenced_block; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/win_thread.hpp0000664000177500017540000000560013540715002017344 0ustar dbartmy// // detail/win_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_THREAD_HPP #define ASIO_DETAIL_WIN_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_APP) \ && !defined(UNDER_CE) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { ASIO_DECL unsigned int __stdcall win_thread_function(void* arg); #if defined(WINVER) && (WINVER < 0x0500) ASIO_DECL void __stdcall apc_function(ULONG data); #else ASIO_DECL void __stdcall apc_function(ULONG_PTR data); #endif template class win_thread_base { public: static bool terminate_threads() { return ::InterlockedExchangeAdd(&terminate_threads_, 0) != 0; } static void set_terminate_threads(bool b) { ::InterlockedExchange(&terminate_threads_, b ? 1 : 0); } private: static long terminate_threads_; }; template long win_thread_base::terminate_threads_ = 0; class win_thread : private noncopyable, public win_thread_base { public: // Constructor. template win_thread(Function f, unsigned int stack_size = 0) : thread_(0), exit_event_(0) { start_thread(new func(f), stack_size); } // Destructor. ASIO_DECL ~win_thread(); // Wait for the thread to exit. ASIO_DECL void join(); private: friend ASIO_DECL unsigned int __stdcall win_thread_function(void* arg); #if defined(WINVER) && (WINVER < 0x0500) friend ASIO_DECL void __stdcall apc_function(ULONG); #else friend ASIO_DECL void __stdcall apc_function(ULONG_PTR); #endif class func_base { public: virtual ~func_base() {} virtual void run() = 0; ::HANDLE entry_event_; ::HANDLE exit_event_; }; struct auto_func_base_ptr { func_base* ptr; ~auto_func_base_ptr() { delete ptr; } }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ASIO_DECL void start_thread(func_base* arg, unsigned int stack_size); ::HANDLE thread_; ::HANDLE exit_event_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_thread.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_APP) // && !defined(UNDER_CE) #endif // ASIO_DETAIL_WIN_THREAD_HPP galera-26.4.3/asio/asio/detail/select_interrupter.hpp0000664000177500017540000000243513540715002021145 0ustar dbartmy// // detail/select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__) # include "asio/detail/socket_select_interrupter.hpp" #elif defined(ASIO_HAS_EVENTFD) # include "asio/detail/eventfd_select_interrupter.hpp" #else # include "asio/detail/pipe_select_interrupter.hpp" #endif namespace asio { namespace detail { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__) typedef socket_select_interrupter select_interrupter; #elif defined(ASIO_HAS_EVENTFD) typedef eventfd_select_interrupter select_interrupter; #else typedef pipe_select_interrupter select_interrupter; #endif } // namespace detail } // namespace asio #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_SELECT_INTERRUPTER_HPP galera-26.4.3/asio/asio/detail/win_fenced_block.hpp0000664000177500017540000000367013540715002020500 0ustar dbartmy// // detail/win_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_FENCED_BLOCK_HPP #define ASIO_DETAIL_WIN_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && !defined(UNDER_CE) #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit win_fenced_block(half_t) { } // Constructor for a full fenced block. explicit win_fenced_block(full_t) { #if defined(__BORLANDC__) LONG barrier = 0; ::InterlockedExchange(&barrier, 1); #elif defined(ASIO_MSVC) \ && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier)) # if defined(_M_IX86) # pragma warning(push) # pragma warning(disable:4793) LONG barrier; __asm { xchg barrier, eax } # pragma warning(pop) # endif // defined(_M_IX86) #else MemoryBarrier(); #endif } // Destructor. ~win_fenced_block() { #if defined(__BORLANDC__) LONG barrier = 0; ::InterlockedExchange(&barrier, 1); #elif defined(ASIO_MSVC) \ && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier)) # if defined(_M_IX86) # pragma warning(push) # pragma warning(disable:4793) LONG barrier; __asm { xchg barrier, eax } # pragma warning(pop) # endif // defined(_M_IX86) #else MemoryBarrier(); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && !defined(UNDER_CE) #endif // ASIO_DETAIL_WIN_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/call_stack.hpp0000664000177500017540000000554413540715002017327 0ustar dbartmy// // detail/call_stack.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CALL_STACK_HPP #define ASIO_DETAIL_CALL_STACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to determine whether or not the current thread is inside an // invocation of io_service::run() for a specified io_service object. template class call_stack { public: // Context class automatically pushes the key/value pair on to the stack. class context : private noncopyable { public: // Push the key on to the stack. explicit context(Key* k) : key_(k), next_(call_stack::top_) { value_ = reinterpret_cast(this); call_stack::top_ = this; } // Push the key/value pair on to the stack. context(Key* k, Value& v) : key_(k), value_(&v), next_(call_stack::top_) { call_stack::top_ = this; } // Pop the key/value pair from the stack. ~context() { call_stack::top_ = next_; } // Find the next context with the same key. Value* next_by_key() const { context* elem = next_; while (elem) { if (elem->key_ == key_) return elem->value_; elem = elem->next_; } return 0; } private: friend class call_stack; // The key associated with the context. Key* key_; // The value associated with the context. Value* value_; // The next element in the stack. context* next_; }; friend class context; // Determine whether the specified owner is on the stack. Returns address of // key if present, 0 otherwise. static Value* contains(Key* k) { context* elem = top_; while (elem) { if (elem->key_ == k) return elem->value_; elem = elem->next_; } return 0; } // Obtain the value at the top of the stack. static Value* top() { context* elem = top_; return elem ? elem->value_ : 0; } private: // The top of the stack of calls for the current thread. static tss_ptr top_; }; template tss_ptr::context> call_stack::top_; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CALL_STACK_HPP galera-26.4.3/asio/asio/detail/wait_op.hpp0000664000177500017540000000164313540715002016665 0ustar dbartmy// // detail/wait_op.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WAIT_OP_HPP #define ASIO_DETAIL_WAIT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class wait_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: wait_op(func_type func) : operation(func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WAIT_OP_HPP galera-26.4.3/asio/asio/detail/handler_type_requirements.hpp0000664000177500017540000004037713540715002022513 0ustar dbartmy// // detail/handler_type_requirements.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP #define ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" // Older versions of gcc have difficulty compiling the sizeof expressions where // we test the handler type requirements. We'll disable checking of handler type // requirements for those compilers, but otherwise enable it by default. #if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) # if !defined(__GNUC__) || (__GNUC__ >= 4) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS 1 # endif // !defined(__GNUC__) || (__GNUC__ >= 4) #endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) // With C++0x we can use a combination of enhanced SFINAE and static_assert to // generate better template error messages. As this technique is not yet widely // portable, we'll only enable it for tested compilers. #if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # if defined(__clang__) # if __has_feature(__cxx_static_assert__) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // __has_feature(cxx_static_assert) # endif // defined(__clang__) #endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) #if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) # include "asio/handler_type.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) // Newer gcc, clang need special treatment to suppress unused typedef warnings. #if defined(__clang__) # if defined(__apple_build_version__) # if (__clang_major__ >= 7) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // (__clang_major__ >= 7) # elif ((__clang_major__ == 3) && (__clang_minor__ >= 6)) \ || (__clang_major__ > 3) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // ((__clang_major__ == 3) && (__clang_minor__ >= 6)) // || (__clang_major__ > 3) #elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4) #endif // defined(__GNUC__) #if !defined(ASIO_UNUSED_TYPEDEF) # define ASIO_UNUSED_TYPEDEF #endif // !defined(ASIO_UNUSED_TYPEDEF) namespace asio { namespace detail { #if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) # if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) template auto zero_arg_handler_test(Handler h, void*) -> decltype( sizeof(Handler(static_cast(h))), ((h)()), char(0)); template char (&zero_arg_handler_test(Handler, ...))[2]; template auto one_arg_handler_test(Handler h, Arg1* a1) -> decltype( sizeof(Handler(static_cast(h))), ((h)(*a1)), char(0)); template char (&one_arg_handler_test(Handler h, ...))[2]; template auto two_arg_handler_test(Handler h, Arg1* a1, Arg2* a2) -> decltype( sizeof(Handler(static_cast(h))), ((h)(*a1, *a2)), char(0)); template char (&two_arg_handler_test(Handler, ...))[2]; # define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) \ static_assert(expr, msg); # else // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) # define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) # endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) template T& lvref(); template T& lvref(T); template const T& clvref(); template const T& clvref(T); template char argbyv(T); template struct handler_type_requirements { }; #define ASIO_COMPLETION_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void()) asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::zero_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), 0)) == 1, \ "CompletionHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()(), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_READ_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ReadHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_WRITE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "WriteHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_ACCEPT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "AcceptHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_CONNECT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "ConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_COMPOSED_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, iter_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ComposedConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_RESOLVE_HANDLER_CHECK( \ handler_type, handler, iter_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, iter_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ResolveHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_WAIT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "WaitHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_SIGNAL_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, int)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "SignalHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "HandshakeHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "BufferedHandshakeHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_SHUTDOWN_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "ShutdownHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #else // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) #define ASIO_COMPLETION_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_READ_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_WRITE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_ACCEPT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_CONNECT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_COMPOSED_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_RESOLVE_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_WAIT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_SIGNAL_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_SHUTDOWN_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #endif // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP galera-26.4.3/asio/asio/detail/variadic_templates.hpp0000664000177500017540000000375213540715002021066 0ustar dbartmy// // detail/variadic_templates.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_VARIADIC_TEMPLATES_HPP #define ASIO_DETAIL_VARIADIC_TEMPLATES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # define ASIO_VARIADIC_TPARAMS(n) ASIO_VARIADIC_TPARAMS_##n # define ASIO_VARIADIC_TPARAMS_1 \ typename T1 # define ASIO_VARIADIC_TPARAMS_2 \ typename T1, typename T2 # define ASIO_VARIADIC_TPARAMS_3 \ typename T1, typename T2, typename T3 # define ASIO_VARIADIC_TPARAMS_4 \ typename T1, typename T2, typename T3, typename T4 # define ASIO_VARIADIC_TPARAMS_5 \ typename T1, typename T2, typename T3, typename T4, typename T5 # define ASIO_VARIADIC_TARGS(n) ASIO_VARIADIC_TARGS_##n # define ASIO_VARIADIC_TARGS_1 x1 # define ASIO_VARIADIC_TARGS_2 x1, x2 # define ASIO_VARIADIC_TARGS_3 x1, x2, x3 # define ASIO_VARIADIC_TARGS_4 x1, x2, x3, x4 # define ASIO_VARIADIC_TARGS_5 x1, x2, x3, x4, x5 # define ASIO_VARIADIC_PARAMS(n) ASIO_VARIADIC_PARAMS_##n # define ASIO_VARIADIC_PARAMS_1 T1 x1 # define ASIO_VARIADIC_PARAMS_2 T1 x1, T2 x2 # define ASIO_VARIADIC_PARAMS_3 T1 x1, T2 x2, T3 x3 # define ASIO_VARIADIC_PARAMS_4 T1 x1, T2 x2, T3 x3, T4 x4 # define ASIO_VARIADIC_PARAMS_5 T1 x1, T2 x2, T3 x3, T4 x4, T5 x5 # define ASIO_VARIADIC_ARGS(n) ASIO_VARIADIC_ARGS_##n # define ASIO_VARIADIC_ARGS_1 x1 # define ASIO_VARIADIC_ARGS_2 x1, x2 # define ASIO_VARIADIC_ARGS_3 x1, x2, x3 # define ASIO_VARIADIC_ARGS_4 x1, x2, x3, x4 # define ASIO_VARIADIC_ARGS_5 x1, x2, x3, x4, x5 # define ASIO_VARIADIC_GENERATE(m) m(1) m(2) m(3) m(4) m(5) #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // ASIO_DETAIL_VARIADIC_TEMPLATES_HPP galera-26.4.3/asio/asio/detail/win_iocp_overlapped_op.hpp0000664000177500017540000000516713540715002021756 0ustar dbartmy// // detail/win_iocp_overlapped_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP #define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_overlapped_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_overlapped_op); win_iocp_overlapped_op(Handler& handler) : operation(&win_iocp_overlapped_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { // Take ownership of the operation object. win_iocp_overlapped_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP galera-26.4.3/asio/asio/detail/win_iocp_io_service.hpp0000664000177500017540000002430513540715002021241 0ustar dbartmy// // detail/win_iocp_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_IO_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/io_service.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/detail/win_iocp_operation.hpp" #include "asio/detail/win_iocp_thread_info.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class wait_op; class win_iocp_io_service : public asio::detail::service_base { public: // Constructor. Specifies a concurrency hint that is passed through to the // underlying I/O completion port. ASIO_DECL win_iocp_io_service(asio::io_service& io_service, size_t concurrency_hint = 0); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Initialise the task. Nothing to do here. void init_task() { } // Register a handle with the IO completion port. ASIO_DECL asio::error_code register_handle( HANDLE handle, asio::error_code& ec); // Run the event loop until stopped or no more work. ASIO_DECL size_t run(asio::error_code& ec); // Run until stopped or one operation is performed. ASIO_DECL size_t run_one(asio::error_code& ec); // Poll for operations without blocking. ASIO_DECL size_t poll(asio::error_code& ec); // Poll for one operation without blocking. ASIO_DECL size_t poll_one(asio::error_code& ec); // Stop the event processing loop. ASIO_DECL void stop(); // Determine whether the io_service is stopped. bool stopped() const { return ::InterlockedExchangeAdd(&stopped_, 0) != 0; } // Reset in preparation for a subsequent run invocation. void reset() { ::InterlockedExchange(&stopped_, 0); } // Notify that some work has started. void work_started() { ::InterlockedIncrement(&outstanding_work_); } // Notify that some work has finished. void work_finished() { if (::InterlockedDecrement(&outstanding_work_) == 0) stop(); } // Return whether a handler can be dispatched immediately. bool can_dispatch() { return thread_call_stack::contains(this) != 0; } // Request invocation of the given handler. template void dispatch(Handler& handler); // Request invocation of the given handler and return immediately. template void post(Handler& handler); // Request invocation of the given operation and return immediately. Assumes // that work_started() has not yet been called for the operation. void post_immediate_completion(win_iocp_operation* op, bool) { work_started(); post_deferred_completion(op); } // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operation. ASIO_DECL void post_deferred_completion(win_iocp_operation* op); // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operations. ASIO_DECL void post_deferred_completions( op_queue& ops); // Request invocation of the given operation using the thread-private queue // and return immediately. Assumes that work_started() has not yet been // called for the operation. void post_private_immediate_completion(win_iocp_operation* op) { post_immediate_completion(op, false); } // Request invocation of the given operation using the thread-private queue // and return immediately. Assumes that work_started() was previously called // for the operation. void post_private_deferred_completion(win_iocp_operation* op) { post_deferred_completion(op); } // Process unfinished operations as part of a shutdown_service operation. // Assumes that work_started() was previously called for the operations. ASIO_DECL void abandon_operations(op_queue& ops); // Called after starting an overlapped I/O operation that did not complete // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_pending(win_iocp_operation* op); // Called after starting an overlapped I/O operation that completed // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_completion(win_iocp_operation* op, DWORD last_error = 0, DWORD bytes_transferred = 0); // Called after starting an overlapped I/O operation that completed // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_completion(win_iocp_operation* op, const asio::error_code& ec, DWORD bytes_transferred = 0); // Add a new timer queue to the service. template void add_timer_queue(timer_queue& timer_queue); // Remove a timer queue from the service. template void remove_timer_queue(timer_queue& timer_queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer associated with the given token. Returns the number of // handlers that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); private: #if defined(WINVER) && (WINVER < 0x0500) typedef DWORD dword_ptr_t; typedef ULONG ulong_ptr_t; #else // defined(WINVER) && (WINVER < 0x0500) typedef DWORD_PTR dword_ptr_t; typedef ULONG_PTR ulong_ptr_t; #endif // defined(WINVER) && (WINVER < 0x0500) // Dequeues at most one operation from the I/O completion port, and then // executes it. Returns the number of operations that were dequeued (i.e. // either 0 or 1). ASIO_DECL size_t do_one(bool block, asio::error_code& ec); // Helper to calculate the GetQueuedCompletionStatus timeout. ASIO_DECL static DWORD get_gqcs_timeout(); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Called to recalculate and update the timeout. ASIO_DECL void update_timeout(); // Helper class to call work_finished() on block exit. struct work_finished_on_block_exit; // Helper class for managing a HANDLE. struct auto_handle { HANDLE handle; auto_handle() : handle(0) {} ~auto_handle() { if (handle) ::CloseHandle(handle); } }; // The IO completion port used for queueing operations. auto_handle iocp_; // The count of unfinished work. long outstanding_work_; // Flag to indicate whether the event loop has been stopped. mutable long stopped_; // Flag to indicate whether there is an in-flight stop event. Every event // posted using PostQueuedCompletionStatus consumes non-paged pool, so to // avoid exhausting this resouce we limit the number of outstanding events. long stop_event_posted_; // Flag to indicate whether the service has been shut down. long shutdown_; enum { // Timeout to use with GetQueuedCompletionStatus on older versions of // Windows. Some versions of windows have a "bug" where a call to // GetQueuedCompletionStatus can appear stuck even though there are events // waiting on the queue. Using a timeout helps to work around the issue. default_gqcs_timeout = 500, // Maximum waitable timer timeout, in milliseconds. max_timeout_msec = 5 * 60 * 1000, // Maximum waitable timer timeout, in microseconds. max_timeout_usec = max_timeout_msec * 1000, // Completion key value used to wake up a thread to dispatch timers or // completed operations. wake_for_dispatch = 1, // Completion key value to indicate that an operation has posted with the // original last_error and bytes_transferred values stored in the fields of // the OVERLAPPED structure. overlapped_contains_result = 2 }; // Timeout to use with GetQueuedCompletionStatus. const DWORD gqcs_timeout_; // Function object for processing timeouts in a background thread. struct timer_thread_function; friend struct timer_thread_function; // Background thread used for processing timeouts. scoped_ptr timer_thread_; // A waitable timer object used for waiting for timeouts. auto_handle waitable_timer_; // Non-zero if timers or completed operations need to be dispatched. long dispatch_required_; // Mutex for protecting access to the timer queues and completed operations. mutex dispatch_mutex_; // The timer queues. timer_queue_set timer_queues_; // The operations that are ready to dispatch. op_queue completed_ops_; // Per-thread call stack to track the state of each thread in the io_service. typedef call_stack thread_call_stack; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/win_iocp_io_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_io_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_IO_SERVICE_HPP galera-26.4.3/asio/asio/detail/winrt_resolve_op.hpp0000664000177500017540000000672213540715002020626 0ustar dbartmy// // detail/winrt_resolve_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_RESOLVE_OP_HPP #define ASIO_DETAIL_WINRT_RESOLVE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_resolve_op : public winrt_async_op< Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^> { public: ASIO_DEFINE_HANDLER_PTR(winrt_resolve_op); typedef typename Protocol::endpoint endpoint_type; typedef asio::ip::basic_resolver_query query_type; typedef asio::ip::basic_resolver_iterator iterator_type; winrt_resolve_op(const query_type& query, Handler& handler) : winrt_async_op< Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^>( &winrt_resolve_op::do_complete), query_(query), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_resolve_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); iterator_type iterator = iterator_type(); if (!o->ec_) { try { iterator = iterator_type::create( o->result_, o->query_.hints(), o->query_.host_name(), o->query_.service_name()); } catch (Platform::Exception^ e) { o->ec_ = asio::error_code(e->HResult, asio::system_category()); } } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, iterator); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: query_type query_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_RESOLVE_OP_HPP galera-26.4.3/asio/asio/detail/date_time_fwd.hpp0000664000177500017540000000140013540715002020005 0ustar dbartmy// // detail/date_time_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DATE_TIME_FWD_HPP #define ASIO_DETAIL_DATE_TIME_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace boost { namespace date_time { template class base_time; } // namespace date_time namespace posix_time { class ptime; } // namespace posix_time } // namespace boost #endif // ASIO_DETAIL_DATE_TIME_FWD_HPP galera-26.4.3/asio/asio/detail/completion_handler.hpp0000664000177500017540000000455013540715002021071 0ustar dbartmy// // detail/completion_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_COMPLETION_HANDLER_HPP #define ASIO_DETAIL_COMPLETION_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/addressof.hpp" #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class completion_handler : public operation { public: ASIO_DEFINE_HANDLER_PTR(completion_handler); completion_handler(Handler& h) : operation(&completion_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. completion_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; ASIO_HANDLER_COMPLETION((h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. Handler handler(ASIO_MOVE_CAST(Handler)(h->handler_)); p.h = asio::detail::addressof(handler); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN(()); asio_handler_invoke_helpers::invoke(handler, handler); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_COMPLETION_HANDLER_HPP galera-26.4.3/asio/asio/detail/bind_handler.hpp0000664000177500017540000003163413540715002017637 0ustar dbartmy// // detail/bind_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BIND_HANDLER_HPP #define ASIO_DETAIL_BIND_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class binder1 { public: binder1(const Handler& handler, const Arg1& arg1) : handler_(handler), arg1_(arg1) { } binder1(Handler& handler, const Arg1& arg1) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1) { } void operator()() { handler_(static_cast(arg1_)); } void operator()() const { handler_(arg1_); } //private: Handler handler_; Arg1 arg1_; }; template inline void* asio_handler_allocate(std::size_t size, binder1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder1* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder1 bind_handler(Handler handler, const Arg1& arg1) { return binder1(handler, arg1); } template class binder2 { public: binder2(const Handler& handler, const Arg1& arg1, const Arg2& arg2) : handler_(handler), arg1_(arg1), arg2_(arg2) { } binder2(Handler& handler, const Arg1& arg1, const Arg2& arg2) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_)); } void operator()() const { handler_(arg1_, arg2_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; }; template inline void* asio_handler_allocate(std::size_t size, binder2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder2* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder2 bind_handler(Handler handler, const Arg1& arg1, const Arg2& arg2) { return binder2(handler, arg1, arg2); } template class binder3 { public: binder3(const Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) : handler_(handler), arg1_(arg1), arg2_(arg2), arg3_(arg3) { } binder3(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_)); } void operator()() const { handler_(arg1_, arg2_, arg3_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; }; template inline void* asio_handler_allocate(std::size_t size, binder3* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder3* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder3* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder3* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder3* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder3 bind_handler(Handler handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) { return binder3(handler, arg1, arg2, arg3); } template class binder4 { public: binder4(const Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) : handler_(handler), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4) { } binder4(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_), static_cast(arg4_)); } void operator()() const { handler_(arg1_, arg2_, arg3_, arg4_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; Arg4 arg4_; }; template inline void* asio_handler_allocate(std::size_t size, binder4* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder4* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder4* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder4* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder4* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder4 bind_handler( Handler handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) { return binder4(handler, arg1, arg2, arg3, arg4); } template class binder5 { public: binder5(const Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) : handler_(handler), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4), arg5_(arg5) { } binder5(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4), arg5_(arg5) { } void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_), static_cast(arg4_), static_cast(arg5_)); } void operator()() const { handler_(arg1_, arg2_, arg3_, arg4_, arg5_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; Arg4 arg4_; Arg5 arg5_; }; template inline void* asio_handler_allocate(std::size_t size, binder5* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder5* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder5* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder5* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder5* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder5 bind_handler( Handler handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) { return binder5(handler, arg1, arg2, arg3, arg4, arg5); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BIND_HANDLER_HPP galera-26.4.3/asio/asio/detail/null_static_mutex.hpp0000664000177500017540000000220113540715002020755 0ustar dbartmy// // detail/null_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_STATIC_MUTEX_HPP #define ASIO_DETAIL_NULL_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct null_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. void init() { } // Lock the mutex. void lock() { } // Unlock the mutex. void unlock() { } int unused_; }; #define ASIO_NULL_STATIC_MUTEX_INIT { 0 } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_STATIC_MUTEX_HPP galera-26.4.3/asio/asio/detail/solaris_fenced_block.hpp0000664000177500017540000000230213540715002021346 0ustar dbartmy// // detail/solaris_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP #define ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__sun) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class solaris_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit solaris_fenced_block(half_t) { } // Constructor for a full fenced block. explicit solaris_fenced_block(full_t) { membar_consumer(); } // Destructor. ~solaris_fenced_block() { membar_producer(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__sun) #endif // ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/buffer_resize_guard.hpp0000664000177500017540000000275013540715002021237 0ustar dbartmy// // detail/buffer_resize_guard.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP #define ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to manage buffer resizing in an exception safe way. template class buffer_resize_guard { public: // Constructor. buffer_resize_guard(Buffer& buffer) : buffer_(buffer), old_size_(buffer.size()) { } // Destructor rolls back the buffer resize unless commit was called. ~buffer_resize_guard() { if (old_size_ != (std::numeric_limits::max)()) { buffer_.resize(old_size_); } } // Commit the resize transaction. void commit() { old_size_ = (std::numeric_limits::max)(); } private: // The buffer being managed. Buffer& buffer_; // The size of the buffer at the time the guard was constructed. size_t old_size_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP galera-26.4.3/asio/asio/detail/config.hpp0000664000177500017540000011223013540715002016463 0ustar dbartmy// // detail/config.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONFIG_HPP #define ASIO_DETAIL_CONFIG_HPP #if defined(ASIO_STANDALONE) # define ASIO_DISABLE_BOOST_ARRAY 1 # define ASIO_DISABLE_BOOST_ASSERT 1 # define ASIO_DISABLE_BOOST_BIND 1 # define ASIO_DISABLE_BOOST_CHRONO 1 # define ASIO_DISABLE_BOOST_DATE_TIME 1 # define ASIO_DISABLE_BOOST_LIMITS 1 # define ASIO_DISABLE_BOOST_REGEX 1 # define ASIO_DISABLE_BOOST_STATIC_CONSTANT 1 # define ASIO_DISABLE_BOOST_THROW_EXCEPTION 1 # define ASIO_DISABLE_BOOST_WORKAROUND 1 #else // defined(ASIO_STANDALONE) # include # include # define ASIO_HAS_BOOST_CONFIG 1 #endif // defined(ASIO_STANDALONE) // Default to a header-only implementation. The user must specifically request // separate compilation by defining either ASIO_SEPARATE_COMPILATION or // ASIO_DYN_LINK (as a DLL/shared library implies separate compilation). #if !defined(ASIO_HEADER_ONLY) # if !defined(ASIO_SEPARATE_COMPILATION) # if !defined(ASIO_DYN_LINK) # define ASIO_HEADER_ONLY 1 # endif // !defined(ASIO_DYN_LINK) # endif // !defined(ASIO_SEPARATE_COMPILATION) #endif // !defined(ASIO_HEADER_ONLY) #if defined(ASIO_HEADER_ONLY) # define ASIO_DECL inline #else // defined(ASIO_HEADER_ONLY) # if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) // We need to import/export our code only if the user has specifically asked // for it by defining ASIO_DYN_LINK. # if defined(ASIO_DYN_LINK) // Export if this is our own source, otherwise import. # if defined(ASIO_SOURCE) # define ASIO_DECL __declspec(dllexport) # else // defined(ASIO_SOURCE) # define ASIO_DECL __declspec(dllimport) # endif // defined(ASIO_SOURCE) # endif // defined(ASIO_DYN_LINK) # endif // defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) #endif // defined(ASIO_HEADER_ONLY) // If ASIO_DECL isn't defined yet define it now. #if !defined(ASIO_DECL) # define ASIO_DECL #endif // !defined(ASIO_DECL) // Microsoft Visual C++ detection. #if !defined(ASIO_MSVC) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC) # define ASIO_MSVC BOOST_MSVC # elif defined(_MSC_VER) && !defined(__MWERKS__) && !defined(__EDG_VERSION__) # define ASIO_MSVC _MSC_VER # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC) #endif // defined(ASIO_MSVC) // Clang / libc++ detection. #if defined(__clang__) # if (__cplusplus >= 201103) # if __has_include(<__config>) # include <__config> # if defined(_LIBCPP_VERSION) # define ASIO_HAS_CLANG_LIBCXX 1 # endif // defined(_LIBCPP_VERSION) # endif // __has_include(<__config>) # endif // (__cplusplus >= 201103) #endif // defined(__clang__) // Support move construction and assignment on compilers known to allow it. #if !defined(ASIO_HAS_MOVE) # if !defined(ASIO_DISABLE_MOVE) # if defined(__clang__) # if __has_feature(__cxx_rvalue_references__) # define ASIO_HAS_MOVE 1 # endif // __has_feature(__cxx_rvalue_references__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_MOVE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_MOVE 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_MOVE) #endif // !defined(ASIO_HAS_MOVE) // If ASIO_MOVE_CAST isn't defined, and move support is available, define // ASIO_MOVE_ARG and ASIO_MOVE_CAST to take advantage of rvalue // references and perfect forwarding. #if defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST) # define ASIO_MOVE_ARG(type) type&& # define ASIO_MOVE_CAST(type) static_cast # define ASIO_MOVE_CAST2(type1, type2) static_cast #endif // defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST) // If ASIO_MOVE_CAST still isn't defined, default to a C++03-compatible // implementation. Note that older g++ and MSVC versions don't like it when you // pass a non-member function through a const reference, so for most compilers // we'll play it safe and stick with the old approach of passing the handler by // value. #if !defined(ASIO_MOVE_CAST) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # define ASIO_MOVE_ARG(type) const type& # else // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # define ASIO_MOVE_ARG(type) type # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1400) # define ASIO_MOVE_ARG(type) const type& # else // (_MSC_VER >= 1400) # define ASIO_MOVE_ARG(type) type # endif // (_MSC_VER >= 1400) # else # define ASIO_MOVE_ARG(type) type # endif # define ASIO_MOVE_CAST(type) static_cast # define ASIO_MOVE_CAST2(type1, type2) static_cast #endif // !defined(ASIO_MOVE_CAST) // Support variadic templates on compilers known to allow it. #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # if !defined(ASIO_DISABLE_VARIADIC_TEMPLATES) # if defined(__clang__) # if __has_feature(__cxx_variadic_templates__) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // __has_feature(__cxx_variadic_templates__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # endif // !defined(ASIO_DISABLE_VARIADIC_TEMPLATES) #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) // Support constexpr on compilers known to allow it. #if !defined(ASIO_HAS_CONSTEXPR) # if !defined(ASIO_DISABLE_CONSTEXPR) # if defined(__clang__) # if __has_feature(__cxx_constexpr__) # define ASIO_HAS_CONSTEXPR 1 # endif // __has_feature(__cxx_constexr__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CONSTEXPR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # endif // !defined(ASIO_DISABLE_CONSTEXPR) #endif // !defined(ASIO_HAS_CONSTEXPR) #if !defined(ASIO_CONSTEXPR) # if defined(ASIO_HAS_CONSTEXPR) # define ASIO_CONSTEXPR constexpr # else // defined(ASIO_HAS_CONSTEXPR) # define ASIO_CONSTEXPR # endif // defined(ASIO_HAS_CONSTEXPR) #endif // !defined(ASIO_CONSTEXPR) // Standard library support for system errors. #if !defined(ASIO_HAS_STD_SYSTEM_ERROR) # if !defined(ASIO_DISABLE_STD_SYSTEM_ERROR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_SYSTEM_ERROR) #endif // !defined(ASIO_HAS_STD_SYSTEM_ERROR) // Compliant C++11 compilers put noexcept specifiers on error_category members. #if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) # if (BOOST_VERSION >= 105300) # define ASIO_ERROR_CATEGORY_NOEXCEPT BOOST_NOEXCEPT # elif defined(__clang__) # if __has_feature(__cxx_noexcept__) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // __has_feature(__cxx_noexcept__) # elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) # define ASIO_ERROR_CATEGORY_NOEXCEPT # endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) #endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) // Standard library support for arrays. #if !defined(ASIO_HAS_STD_ARRAY) # if !defined(ASIO_DISABLE_STD_ARRAY) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ARRAY 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_ARRAY 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ARRAY 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_ARRAY 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ARRAY) #endif // !defined(ASIO_HAS_STD_ARRAY) // Standard library support for shared_ptr and weak_ptr. #if !defined(ASIO_HAS_STD_SHARED_PTR) # if !defined(ASIO_DISABLE_STD_SHARED_PTR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_SHARED_PTR 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_SHARED_PTR) #endif // !defined(ASIO_HAS_STD_SHARED_PTR) // Standard library support for atomic operations. #if !defined(ASIO_HAS_STD_ATOMIC) # if !defined(ASIO_DISABLE_STD_ATOMIC) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ATOMIC 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_ATOMIC 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ATOMIC 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_ATOMIC 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ATOMIC) #endif // !defined(ASIO_HAS_STD_ATOMIC) // Standard library support for chrono. Some standard libraries (such as the // libstdc++ shipped with gcc 4.6) provide monotonic_clock as per early C++0x // drafts, rather than the eventually standardised name of steady_clock. #if !defined(ASIO_HAS_STD_CHRONO) # if !defined(ASIO_DISABLE_STD_CHRONO) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_CHRONO 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_CHRONO 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_CHRONO 1 # if ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6)) # define ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK 1 # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6)) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_CHRONO 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_CHRONO) #endif // !defined(ASIO_HAS_STD_CHRONO) // Boost support for chrono. #if !defined(ASIO_HAS_BOOST_CHRONO) # if !defined(ASIO_DISABLE_BOOST_CHRONO) # if (BOOST_VERSION >= 104700) # define ASIO_HAS_BOOST_CHRONO 1 # endif // (BOOST_VERSION >= 104700) # endif // !defined(ASIO_DISABLE_BOOST_CHRONO) #endif // !defined(ASIO_HAS_BOOST_CHRONO) // Boost support for the DateTime library. #if !defined(ASIO_HAS_BOOST_DATE_TIME) # if !defined(ASIO_DISABLE_BOOST_DATE_TIME) # define ASIO_HAS_BOOST_DATE_TIME 1 # endif // !defined(ASIO_DISABLE_BOOST_DATE_TIME) #endif // !defined(ASIO_HAS_BOOST_DATE_TIME) // Standard library support for addressof. #if !defined(ASIO_HAS_STD_ADDRESSOF) # if !defined(ASIO_DISABLE_STD_ADDRESSOF) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ADDRESSOF 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ADDRESSOF) #endif // !defined(ASIO_HAS_STD_ADDRESSOF) // Standard library support for the function class. #if !defined(ASIO_HAS_STD_FUNCTION) # if !defined(ASIO_DISABLE_STD_FUNCTION) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_FUNCTION 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_FUNCTION 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_FUNCTION 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_FUNCTION 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_FUNCTION) #endif // !defined(ASIO_HAS_STD_FUNCTION) // Standard library support for type traits. #if !defined(ASIO_HAS_STD_TYPE_TRAITS) # if !defined(ASIO_DISABLE_STD_TYPE_TRAITS) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_TYPE_TRAITS 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_TYPE_TRAITS) #endif // !defined(ASIO_HAS_STD_TYPE_TRAITS) // Standard library support for the cstdint header. #if !defined(ASIO_HAS_CSTDINT) # if !defined(ASIO_DISABLE_CSTDINT) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_CSTDINT 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_CSTDINT 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CSTDINT 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_CSTDINT 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_CSTDINT) #endif // !defined(ASIO_HAS_CSTDINT) // Standard library support for the thread class. #if !defined(ASIO_HAS_STD_THREAD) # if !defined(ASIO_DISABLE_STD_THREAD) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_THREAD 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_THREAD 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_THREAD 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_THREAD 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_THREAD) #endif // !defined(ASIO_HAS_STD_THREAD) // Standard library support for the mutex and condition variable classes. #if !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # if !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR) #endif // !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) // Windows App target. Windows but with a limited API. #if !defined(ASIO_WINDOWS_APP) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0603) # include # if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \ && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # define ASIO_WINDOWS_APP 1 # endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0603) #endif // !defined(ASIO_WINDOWS_APP) // Legacy WinRT target. Windows App is preferred. #if !defined(ASIO_WINDOWS_RUNTIME) # if !defined(ASIO_WINDOWS_APP) # if defined(__cplusplus_winrt) # include # if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \ && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # define ASIO_WINDOWS_RUNTIME 1 # endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # endif // defined(__cplusplus_winrt) # endif // !defined(ASIO_WINDOWS_APP) #endif // !defined(ASIO_WINDOWS_RUNTIME) // Windows target. Excludes WinRT but includes Windows App targets. #if !defined(ASIO_WINDOWS) # if !defined(ASIO_WINDOWS_RUNTIME) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS) # define ASIO_WINDOWS 1 # elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__) # define ASIO_WINDOWS 1 # elif defined(ASIO_WINDOWS_APP) # define ASIO_WINDOWS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS) # endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_WINDOWS) // Windows: target OS version. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS) # if defined(_MSC_VER) || defined(__BORLANDC__) # pragma message( \ "Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. For example:\n"\ "- add -D_WIN32_WINNT=0x0501 to the compiler command line; or\n"\ "- add _WIN32_WINNT=0x0501 to your project's Preprocessor Definitions.\n"\ "Assuming _WIN32_WINNT=0x0501 (i.e. Windows XP target).") # else // defined(_MSC_VER) || defined(__BORLANDC__) # warning Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. # warning For example, add -D_WIN32_WINNT=0x0501 to the compiler command line. # warning Assuming _WIN32_WINNT=0x0501 (i.e. Windows XP target). # endif // defined(_MSC_VER) || defined(__BORLANDC__) # define _WIN32_WINNT 0x0501 # endif // !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS) # if defined(_MSC_VER) # if defined(_WIN32) && !defined(WIN32) # if !defined(_WINSOCK2API_) # define WIN32 // Needed for correct types in winsock2.h # else // !defined(_WINSOCK2API_) # error Please define the macro WIN32 in your compiler options # endif // !defined(_WINSOCK2API_) # endif // defined(_WIN32) && !defined(WIN32) # endif // defined(_MSC_VER) # if defined(__BORLANDC__) # if defined(__WIN32__) && !defined(WIN32) # if !defined(_WINSOCK2API_) # define WIN32 // Needed for correct types in winsock2.h # else // !defined(_WINSOCK2API_) # error Please define the macro WIN32 in your compiler options # endif // !defined(_WINSOCK2API_) # endif // defined(__WIN32__) && !defined(WIN32) # endif // defined(__BORLANDC__) # if defined(__CYGWIN__) # if !defined(__USE_W32_SOCKETS) # error You must add -D__USE_W32_SOCKETS to your compiler options. # endif // !defined(__USE_W32_SOCKETS) # endif // defined(__CYGWIN__) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: minimise header inclusion. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(ASIO_NO_WIN32_LEAN_AND_MEAN) # if !defined(WIN32_LEAN_AND_MEAN) # define WIN32_LEAN_AND_MEAN # endif // !defined(WIN32_LEAN_AND_MEAN) # endif // !defined(ASIO_NO_WIN32_LEAN_AND_MEAN) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: suppress definition of "min" and "max" macros. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(ASIO_NO_NOMINMAX) # if !defined(NOMINMAX) # define NOMINMAX 1 # endif // !defined(NOMINMAX) # endif // !defined(ASIO_NO_NOMINMAX) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: IO Completion Ports. #if !defined(ASIO_HAS_IOCP) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400) # if !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # if !defined(ASIO_DISABLE_IOCP) # define ASIO_HAS_IOCP 1 # endif // !defined(ASIO_DISABLE_IOCP) # endif // !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400) # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // !defined(ASIO_HAS_IOCP) // On POSIX (and POSIX-like) platforms we need to include unistd.h in order to // get access to the various platform feature macros, e.g. to be able to test // for threads support. #if !defined(ASIO_HAS_UNISTD_H) # if !defined(ASIO_HAS_BOOST_CONFIG) # if defined(unix) \ || defined(__unix) \ || defined(_XOPEN_SOURCE) \ || defined(_POSIX_SOURCE) \ || (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) \ || defined(__linux__) # define ASIO_HAS_UNISTD_H 1 # endif # endif // !defined(ASIO_HAS_BOOST_CONFIG) #endif // !defined(ASIO_HAS_UNISTD_H) #if defined(ASIO_HAS_UNISTD_H) # include #endif // defined(ASIO_HAS_UNISTD_H) // Linux: epoll, eventfd and timerfd. #if defined(__linux__) # include # if !defined(ASIO_HAS_EPOLL) # if !defined(ASIO_DISABLE_EPOLL) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45) # define ASIO_HAS_EPOLL 1 # endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45) # endif // !defined(ASIO_DISABLE_EPOLL) # endif // !defined(ASIO_HAS_EPOLL) # if !defined(ASIO_HAS_EVENTFD) # if !defined(ASIO_DISABLE_EVENTFD) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) # define ASIO_HAS_EVENTFD 1 # endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) # endif // !defined(ASIO_DISABLE_EVENTFD) # endif // !defined(ASIO_HAS_EVENTFD) # if !defined(ASIO_HAS_TIMERFD) # if defined(ASIO_HAS_EPOLL) # if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) # define ASIO_HAS_TIMERFD 1 # endif // (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) # endif // defined(ASIO_HAS_EPOLL) # endif // !defined(ASIO_HAS_TIMERFD) #endif // defined(__linux__) // Mac OS X, FreeBSD, NetBSD, OpenBSD: kqueue. #if (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) # if !defined(ASIO_HAS_KQUEUE) # if !defined(ASIO_DISABLE_KQUEUE) # define ASIO_HAS_KQUEUE 1 # endif // !defined(ASIO_DISABLE_KQUEUE) # endif // !defined(ASIO_HAS_KQUEUE) #endif // (defined(__MACH__) && defined(__APPLE__)) // || defined(__FreeBSD__) // || defined(__NetBSD__) // || defined(__OpenBSD__) // Solaris: /dev/poll. #if defined(__sun) # if !defined(ASIO_HAS_DEV_POLL) # if !defined(ASIO_DISABLE_DEV_POLL) # define ASIO_HAS_DEV_POLL 1 # endif // !defined(ASIO_DISABLE_DEV_POLL) # endif // !defined(ASIO_HAS_DEV_POLL) #endif // defined(__sun) // Serial ports. #if !defined(ASIO_HAS_SERIAL_PORT) # if defined(ASIO_HAS_IOCP) \ || !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # if !defined(__SYMBIAN32__) # if !defined(ASIO_DISABLE_SERIAL_PORT) # define ASIO_HAS_SERIAL_PORT 1 # endif // !defined(ASIO_DISABLE_SERIAL_PORT) # endif // !defined(__SYMBIAN32__) # endif // defined(ASIO_HAS_IOCP) // || !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // !defined(ASIO_HAS_SERIAL_PORT) // Windows: stream handles. #if !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_STREAM_HANDLE 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // Windows: random access handles. #if !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // Windows: object handles. #if !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # define ASIO_HAS_WINDOWS_OBJECT_HANDLE 1 # endif // !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // Windows: OVERLAPPED wrapper. #if !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) # if !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_OVERLAPPED_PTR 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR) #endif // !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) // POSIX: stream-oriented file descriptors. #if !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) # if !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_POSIX_STREAM_DESCRIPTOR 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR) #endif // !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // UNIX domain sockets. #if !defined(ASIO_HAS_LOCAL_SOCKETS) # if !defined(ASIO_DISABLE_LOCAL_SOCKETS) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_LOCAL_SOCKETS 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_LOCAL_SOCKETS) #endif // !defined(ASIO_HAS_LOCAL_SOCKETS) // Can use sigaction() instead of signal(). #if !defined(ASIO_HAS_SIGACTION) # if !defined(ASIO_DISABLE_SIGACTION) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_SIGACTION 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_SIGACTION) #endif // !defined(ASIO_HAS_SIGACTION) // Can use signal(). #if !defined(ASIO_HAS_SIGNAL) # if !defined(ASIO_DISABLE_SIGNAL) # if !defined(UNDER_CE) # define ASIO_HAS_SIGNAL 1 # endif // !defined(UNDER_CE) # endif // !defined(ASIO_DISABLE_SIGNAL) #endif // !defined(ASIO_HAS_SIGNAL) // Can use getaddrinfo() and getnameinfo(). #if !defined(ASIO_HAS_GETADDRINFO) # if !defined(ASIO_DISABLE_GETADDRINFO) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501) # define ASIO_HAS_GETADDRINFO 1 # elif defined(UNDER_CE) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(UNDER_CE) # elif defined(__MACH__) && defined(__APPLE__) # if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # if (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1050) # define ASIO_HAS_GETADDRINFO 1 # endif // (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1050) # else // defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # else // defined(__MACH__) && defined(__APPLE__) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(__MACH__) && defined(__APPLE__) # endif // !defined(ASIO_DISABLE_GETADDRINFO) #endif // !defined(ASIO_HAS_GETADDRINFO) // Whether standard iostreams are disabled. #if !defined(ASIO_NO_IOSTREAM) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_IOSTREAM) # define ASIO_NO_IOSTREAM 1 # endif // !defined(BOOST_NO_IOSTREAM) #endif // !defined(ASIO_NO_IOSTREAM) // Whether exception handling is disabled. #if !defined(ASIO_NO_EXCEPTIONS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_EXCEPTIONS) # define ASIO_NO_EXCEPTIONS 1 # endif // !defined(BOOST_NO_EXCEPTIONS) #endif // !defined(ASIO_NO_EXCEPTIONS) // Whether the typeid operator is supported. #if !defined(ASIO_NO_TYPEID) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_TYPEID) # define ASIO_NO_TYPEID 1 # endif // !defined(BOOST_NO_TYPEID) #endif // !defined(ASIO_NO_TYPEID) // Threads. #if !defined(ASIO_HAS_THREADS) # if !defined(ASIO_DISABLE_THREADS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS) # define ASIO_HAS_THREADS 1 # elif defined(_MSC_VER) && defined(_MT) # define ASIO_HAS_THREADS 1 # elif defined(__BORLANDC__) && defined(__MT__) # define ASIO_HAS_THREADS 1 # elif defined(_POSIX_THREADS) # define ASIO_HAS_THREADS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS) # endif // !defined(ASIO_DISABLE_THREADS) #endif // !defined(ASIO_HAS_THREADS) // POSIX threads. #if !defined(ASIO_HAS_PTHREADS) # if defined(ASIO_HAS_THREADS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS) # define ASIO_HAS_PTHREADS 1 # elif defined(_POSIX_THREADS) # define ASIO_HAS_PTHREADS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS) # endif // defined(ASIO_HAS_THREADS) #endif // !defined(ASIO_HAS_PTHREADS) // Helper to prevent macro expansion. #define ASIO_PREVENT_MACRO_SUBSTITUTION // Helper to define in-class constants. #if !defined(ASIO_STATIC_CONSTANT) # if !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) # define ASIO_STATIC_CONSTANT(type, assignment) \ BOOST_STATIC_CONSTANT(type, assignment) # else // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) # define ASIO_STATIC_CONSTANT(type, assignment) \ static const type assignment # endif // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) #endif // !defined(ASIO_STATIC_CONSTANT) // Boost array library. #if !defined(ASIO_HAS_BOOST_ARRAY) # if !defined(ASIO_DISABLE_BOOST_ARRAY) # define ASIO_HAS_BOOST_ARRAY 1 # endif // !defined(ASIO_DISABLE_BOOST_ARRAY) #endif // !defined(ASIO_HAS_BOOST_ARRAY) // Boost assert macro. #if !defined(ASIO_HAS_BOOST_ASSERT) # if !defined(ASIO_DISABLE_BOOST_ASSERT) # define ASIO_HAS_BOOST_ASSERT 1 # endif // !defined(ASIO_DISABLE_BOOST_ASSERT) #endif // !defined(ASIO_HAS_BOOST_ASSERT) // Boost limits header. #if !defined(ASIO_HAS_BOOST_LIMITS) # if !defined(ASIO_DISABLE_BOOST_LIMITS) # define ASIO_HAS_BOOST_LIMITS 1 # endif // !defined(ASIO_DISABLE_BOOST_LIMITS) #endif // !defined(ASIO_HAS_BOOST_LIMITS) // Boost throw_exception function. #if !defined(ASIO_HAS_BOOST_THROW_EXCEPTION) # if !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION) # define ASIO_HAS_BOOST_THROW_EXCEPTION 1 # endif // !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION) #endif // !defined(ASIO_HAS_BOOST_THROW_EXCEPTION) // Boost regex library. #if !defined(ASIO_HAS_BOOST_REGEX) # if !defined(ASIO_DISABLE_BOOST_REGEX) # define ASIO_HAS_BOOST_REGEX 1 # endif // !defined(ASIO_DISABLE_BOOST_REGEX) #endif // !defined(ASIO_HAS_BOOST_REGEX) // Boost bind function. #if !defined(ASIO_HAS_BOOST_BIND) # if !defined(ASIO_DISABLE_BOOST_BIND) # define ASIO_HAS_BOOST_BIND 1 # endif // !defined(ASIO_DISABLE_BOOST_BIND) #endif // !defined(ASIO_HAS_BOOST_BIND) // Boost's BOOST_WORKAROUND macro. #if !defined(ASIO_HAS_BOOST_WORKAROUND) # if !defined(ASIO_DISABLE_BOOST_WORKAROUND) # define ASIO_HAS_BOOST_WORKAROUND 1 # endif // !defined(ASIO_DISABLE_BOOST_WORKAROUND) #endif // !defined(ASIO_HAS_BOOST_WORKAROUND) // Microsoft Visual C++'s secure C runtime library. #if !defined(ASIO_HAS_SECURE_RTL) # if !defined(ASIO_DISABLE_SECURE_RTL) # if defined(ASIO_MSVC) \ && (ASIO_MSVC >= 1400) \ && !defined(UNDER_CE) # define ASIO_HAS_SECURE_RTL 1 # endif // defined(ASIO_MSVC) // && (ASIO_MSVC >= 1400) // && !defined(UNDER_CE) # endif // !defined(ASIO_DISABLE_SECURE_RTL) #endif // !defined(ASIO_HAS_SECURE_RTL) // Handler hooking. Disabled for ancient Borland C++ and gcc compilers. #if !defined(ASIO_HAS_HANDLER_HOOKS) # if !defined(ASIO_DISABLE_HANDLER_HOOKS) # if defined(__GNUC__) # if (__GNUC__ >= 3) # define ASIO_HAS_HANDLER_HOOKS 1 # endif // (__GNUC__ >= 3) # elif !defined(__BORLANDC__) # define ASIO_HAS_HANDLER_HOOKS 1 # endif // !defined(__BORLANDC__) # endif // !defined(ASIO_DISABLE_HANDLER_HOOKS) #endif // !defined(ASIO_HAS_HANDLER_HOOKS) // Support for the __thread keyword extension. #if !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION) # if defined(__linux__) # if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # if ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) # if !defined(__INTEL_COMPILER) && !defined(__ICL) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # define ASIO_THREAD_KEYWORD __thread # elif defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # endif // defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) # endif // ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) # endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # endif // defined(__linux__) # if defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME) # if (_MSC_VER >= 1700) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # define ASIO_THREAD_KEYWORD __declspec(thread) # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION) #if !defined(ASIO_THREAD_KEYWORD) # define ASIO_THREAD_KEYWORD __thread #endif // !defined(ASIO_THREAD_KEYWORD) // Support for POSIX ssize_t typedef. #if !defined(ASIO_DISABLE_SSIZE_T) # if defined(__linux__) \ || (defined(__MACH__) && defined(__APPLE__)) # define ASIO_HAS_SSIZE_T 1 # endif // defined(__linux__) // || (defined(__MACH__) && defined(__APPLE__)) #endif // !defined(ASIO_DISABLE_SSIZE_T) #endif // ASIO_DETAIL_CONFIG_HPP galera-26.4.3/asio/asio/detail/null_mutex.hpp0000664000177500017540000000217513540715002017420 0ustar dbartmy// // detail/null_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_MUTEX_HPP #define ASIO_DETAIL_NULL_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. null_mutex() { } // Destructor. ~null_mutex() { } // Lock the mutex. void lock() { } // Unlock the mutex. void unlock() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_MUTEX_HPP galera-26.4.3/asio/asio/detail/weak_ptr.hpp0000664000177500017540000000167513540715002017044 0ustar dbartmy// // detail/weak_ptr.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WEAK_PTR_HPP #define ASIO_DETAIL_WEAK_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SHARED_PTR) # include #else // defined(ASIO_HAS_STD_SHARED_PTR) # include #endif // defined(ASIO_HAS_STD_SHARED_PTR) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_SHARED_PTR) using std::weak_ptr; #else // defined(ASIO_HAS_STD_SHARED_PTR) using boost::weak_ptr; #endif // defined(ASIO_HAS_STD_SHARED_PTR) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_WEAK_PTR_HPP galera-26.4.3/asio/asio/detail/winrt_socket_connect_op.hpp0000664000177500017540000000525713540715002022152 0ustar dbartmy// // detail/winrt_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_connect_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_connect_op); winrt_socket_connect_op(Handler& handler) : winrt_async_op(&winrt_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_connect_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP galera-26.4.3/asio/asio/detail/dev_poll_reactor.hpp0000664000177500017540000001565513540715002020556 0ustar dbartmy// // detail/dev_poll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEV_POLL_REACTOR_HPP #define ASIO_DETAIL_DEV_POLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include #include #include #include "asio/detail/hash_map.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class dev_poll_reactor : public asio::detail::service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor data. struct per_descriptor_data { }; // Constructor. ASIO_DECL dev_poll_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~dev_poll_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data&, bool closing); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data&); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run /dev/poll once until interrupted or events are ready to be dispatched. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: // Create the /dev/poll file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_dev_poll_create(); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the /dev/poll DP_POLL operation. The timeout // value is returned as a number of milliseconds. A return value of -1 // indicates that the poll should block indefinitely. ASIO_DECL int get_timeout(); // Cancel all operations associated with the given descriptor. The do_cancel // function of the handler objects will be invoked. This function does not // acquire the dev_poll_reactor's mutex. ASIO_DECL void cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec); // Add a pending event entry for the given descriptor. ASIO_DECL ::pollfd& add_pending_event_change(int descriptor); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // The /dev/poll file descriptor. int dev_poll_fd_; // Vector of /dev/poll events waiting to be written to the descriptor. std::vector< ::pollfd> pending_event_changes_; // Hash map to associate a descriptor with a pending event change index. hash_map pending_event_change_index_; // The interrupter is used to break a blocking DP_POLL operation. select_interrupter interrupter_; // The queues of read, write and except operations. reactor_op_queue op_queue_[max_ops]; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/dev_poll_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/dev_poll_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_DEV_POLL_REACTOR_HPP galera-26.4.3/asio/asio/detail/win_iocp_operation.hpp0000664000177500017540000000376013540715002021114 0ustar dbartmy// // detail/win_iocp_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OPERATION_HPP #define ASIO_DETAIL_WIN_IOCP_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/handler_tracking.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_io_service; // Base class for all operations. A function pointer is used instead of virtual // functions to avoid the associated overhead. class win_iocp_operation : public OVERLAPPED ASIO_ALSO_INHERIT_TRACKED_HANDLER { public: void complete(win_iocp_io_service& owner, const asio::error_code& ec, std::size_t bytes_transferred) { func_(&owner, this, ec, bytes_transferred); } void destroy() { func_(0, this, asio::error_code(), 0); } protected: typedef void (*func_type)( win_iocp_io_service*, win_iocp_operation*, const asio::error_code&, std::size_t); win_iocp_operation(func_type func) : next_(0), func_(func) { reset(); } // Prevents deletion through this type. ~win_iocp_operation() { } void reset() { Internal = 0; InternalHigh = 0; Offset = 0; OffsetHigh = 0; hEvent = 0; ready_ = 0; } private: friend class op_queue_access; friend class win_iocp_io_service; win_iocp_operation* next_; func_type func_; long ready_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OPERATION_HPP galera-26.4.3/asio/asio/detail/win_tss_ptr.hpp0000664000177500017540000000312513540715002017573 0ustar dbartmy// // detail/win_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_TSS_PTR_HPP #define ASIO_DETAIL_WIN_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper function to create thread-specific storage. ASIO_DECL DWORD win_tss_ptr_create(); template class win_tss_ptr : private noncopyable { public: // Constructor. win_tss_ptr() : tss_key_(win_tss_ptr_create()) { } // Destructor. ~win_tss_ptr() { ::TlsFree(tss_key_); } // Get the value. operator T*() const { return static_cast(::TlsGetValue(tss_key_)); } // Set the value. void operator=(T* value) { ::TlsSetValue(tss_key_, value); } private: // Thread-specific storage to allow unlocked access to determine whether a // thread is a member of the pool. DWORD tss_key_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_tss_ptr.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_TSS_PTR_HPP galera-26.4.3/asio/asio/detail/descriptor_write_op.hpp0000664000177500017540000000707713540715002021320 0ustar dbartmy// // detail/descriptor_write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP #define ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class descriptor_write_op_base : public reactor_op { public: descriptor_write_op_base(int descriptor, const ConstBufferSequence& buffers, func_type complete_func) : reactor_op(&descriptor_write_op_base::do_perform, complete_func), descriptor_(descriptor), buffers_(buffers) { } static bool do_perform(reactor_op* base) { descriptor_write_op_base* o(static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return descriptor_ops::non_blocking_write(o->descriptor_, bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_); } private: int descriptor_; ConstBufferSequence buffers_; }; template class descriptor_write_op : public descriptor_write_op_base { public: ASIO_DEFINE_HANDLER_PTR(descriptor_write_op); descriptor_write_op(int descriptor, const ConstBufferSequence& buffers, Handler& handler) : descriptor_write_op_base( descriptor, buffers, &descriptor_write_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. descriptor_write_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP galera-26.4.3/asio/asio/detail/socket_ops.hpp0000664000177500017540000002447513540715002017404 0ustar dbartmy// // detail/socket_ops.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPS_HPP #define ASIO_DETAIL_SOCKET_OPS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/shared_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/weak_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_ops { // Socket state bits. enum { // The user wants a non-blocking socket. user_set_non_blocking = 1, // The socket has been set non-blocking. internal_non_blocking = 2, // Helper "state" used to determine whether the socket is non-blocking. non_blocking = user_set_non_blocking | internal_non_blocking, // User wants connection_aborted errors, which are disabled by default. enable_connection_aborted = 4, // The user set the linger option. Needs to be checked when closing. user_set_linger = 8, // The socket is stream-oriented. stream_oriented = 16, // The socket is datagram-oriented. datagram_oriented = 32, // The socket may have been dup()-ed. possible_dup = 64 }; typedef unsigned char state_type; struct noop_deleter { void operator()(void*) {} }; typedef shared_ptr shared_cancel_token_type; typedef weak_ptr weak_cancel_token_type; #if !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL socket_type accept(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL socket_type sync_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_accept(socket_type s, void* output_buffer, DWORD address_length, socket_addr_type* addr, std::size_t* addrlen, socket_type new_socket, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, socket_type& new_socket); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL int bind(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL int close(socket_type s, state_type& state, bool destruction, asio::error_code& ec); ASIO_DECL bool set_user_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec); ASIO_DECL bool set_internal_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec); ASIO_DECL int shutdown(socket_type s, int what, asio::error_code& ec); ASIO_DECL int connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL void sync_connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_connect(socket_type s, asio::error_code& ec); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_connect(socket_type s, asio::error_code& ec); ASIO_DECL int socketpair(int af, int type, int protocol, socket_type sv[2], asio::error_code& ec); ASIO_DECL bool sockatmark(socket_type s, asio::error_code& ec); ASIO_DECL size_t available(socket_type s, asio::error_code& ec); ASIO_DECL int listen(socket_type s, int backlog, asio::error_code& ec); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef WSABUF buf; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef iovec buf; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ASIO_DECL void init_buf(buf& b, void* data, size_t size); ASIO_DECL void init_buf(buf& b, const void* data, size_t size); ASIO_DECL signed_size_type recv(socket_type s, buf* bufs, size_t count, int flags, asio::error_code& ec); ASIO_DECL size_t sync_recv(socket_type s, state_type state, buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recv(state_type state, const weak_cancel_token_type& cancel_token, bool all_empty, asio::error_code& ec, size_t bytes_transferred); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recv(socket_type s, buf* bufs, size_t count, int flags, bool is_stream, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recvfrom( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec); ASIO_DECL size_t sync_recvmsg(socket_type s, state_type state, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recvmsg( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec); ASIO_DECL size_t sync_send(socket_type s, state_type state, const buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_send( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL size_t sync_sendto(socket_type s, state_type state, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); #if !defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec, size_t& bytes_transferred); #endif // !defined(ASIO_HAS_IOCP) ASIO_DECL socket_type socket(int af, int type, int protocol, asio::error_code& ec); ASIO_DECL int setsockopt(socket_type s, state_type& state, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec); ASIO_DECL int getsockopt(socket_type s, state_type state, int level, int optname, void* optval, size_t* optlen, asio::error_code& ec); ASIO_DECL int getpeername(socket_type s, socket_addr_type* addr, std::size_t* addrlen, bool cached, asio::error_code& ec); ASIO_DECL int getsockname(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL int ioctl(socket_type s, state_type& state, int cmd, ioctl_arg_type* arg, asio::error_code& ec); ASIO_DECL int select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, timeval* timeout, asio::error_code& ec); ASIO_DECL int poll_read(socket_type s, state_type state, asio::error_code& ec); ASIO_DECL int poll_write(socket_type s, state_type state, asio::error_code& ec); ASIO_DECL int poll_connect(socket_type s, asio::error_code& ec); #endif // !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL const char* inet_ntop(int af, const void* src, char* dest, size_t length, unsigned long scope_id, asio::error_code& ec); ASIO_DECL int inet_pton(int af, const char* src, void* dest, unsigned long* scope_id, asio::error_code& ec); ASIO_DECL int gethostname(char* name, int namelen, asio::error_code& ec); #if !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL asio::error_code getaddrinfo(const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec); ASIO_DECL asio::error_code background_getaddrinfo( const weak_cancel_token_type& cancel_token, const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec); ASIO_DECL void freeaddrinfo(addrinfo_type* ai); ASIO_DECL asio::error_code getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec); ASIO_DECL asio::error_code sync_getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec); ASIO_DECL asio::error_code background_getnameinfo( const weak_cancel_token_type& cancel_token, const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec); #endif // !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL u_long_type network_to_host_long(u_long_type value); ASIO_DECL u_long_type host_to_network_long(u_long_type value); ASIO_DECL u_short_type network_to_host_short(u_short_type value); ASIO_DECL u_short_type host_to_network_short(u_short_type value); } // namespace socket_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/socket_ops.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SOCKET_OPS_HPP galera-26.4.3/asio/asio/detail/service_registry.hpp0000664000177500017540000001100313540715002020602 0ustar dbartmy// // detail/service_registry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SERVICE_REGISTRY_HPP #define ASIO_DETAIL_SERVICE_REGISTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class typeid_wrapper {}; class service_registry : private noncopyable { public: // Constructor. Adds the initial service. template service_registry(asio::io_service& o, Service* initial_service, Arg arg); // Destructor. ASIO_DECL ~service_registry(); // Notify all services of a fork event. ASIO_DECL void notify_fork(asio::io_service::fork_event fork_ev); // Get the first service object cast to the specified type. Called during // io_service construction and so performs no locking or type checking. template Service& first_service(); // Get the service object corresponding to the specified service type. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. template Service& use_service(); // Add a service object. Throws on error, in which case ownership of the // object is retained by the caller. template void add_service(Service* new_service); // Check whether a service object of the specified type already exists. template bool has_service() const; private: // Initialise a service's key based on its id. ASIO_DECL static void init_key( asio::io_service::service::key& key, const asio::io_service::id& id); #if !defined(ASIO_NO_TYPEID) // Initialise a service's key based on its id. template static void init_key(asio::io_service::service::key& key, const asio::detail::service_id& /*id*/); #endif // !defined(ASIO_NO_TYPEID) // Check if a service matches the given id. ASIO_DECL static bool keys_match( const asio::io_service::service::key& key1, const asio::io_service::service::key& key2); // The type of a factory function used for creating a service instance. typedef asio::io_service::service* (*factory_type)(asio::io_service&); // Factory function for creating a service instance. template static asio::io_service::service* create( asio::io_service& owner); // Destroy a service instance. ASIO_DECL static void destroy( asio::io_service::service* service); // Helper class to manage service pointers. struct auto_service_ptr; friend struct auto_service_ptr; struct auto_service_ptr { asio::io_service::service* ptr_; ~auto_service_ptr() { destroy(ptr_); } }; // Get the service object corresponding to the specified service key. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. ASIO_DECL asio::io_service::service* do_use_service( const asio::io_service::service::key& key, factory_type factory); // Add a service object. Throws on error, in which case ownership of the // object is retained by the caller. ASIO_DECL void do_add_service( const asio::io_service::service::key& key, asio::io_service::service* new_service); // Check whether a service object with the specified key already exists. ASIO_DECL bool do_has_service( const asio::io_service::service::key& key) const; // Mutex to protect access to internal data. mutable asio::detail::mutex mutex_; // The owner of this service registry and the services it contains. asio::io_service& owner_; // The first service in the list of contained services. asio::io_service::service* first_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/service_registry.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/service_registry.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SERVICE_REGISTRY_HPP galera-26.4.3/asio/asio/detail/addressof.hpp0000664000177500017540000000170713540715002017176 0ustar dbartmy// // detail/addressof.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ADDRESSOF_HPP #define ASIO_DETAIL_ADDRESSOF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_ADDRESSOF) # include #else // defined(ASIO_HAS_STD_ADDRESSOF) # include #endif // defined(ASIO_HAS_STD_ADDRESSOF) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_ADDRESSOF) using std::addressof; #else // defined(ASIO_HAS_STD_ADDRESSOF) using boost::addressof; #endif // defined(ASIO_HAS_STD_ADDRESSOF) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ADDRESSOF_HPP galera-26.4.3/asio/asio/detail/noncopyable.hpp0000664000177500017540000000162613540715002017535 0ustar dbartmy// // detail/noncopyable.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NONCOPYABLE_HPP #define ASIO_DETAIL_NONCOPYABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class noncopyable { protected: noncopyable() {} ~noncopyable() {} private: noncopyable(const noncopyable&); const noncopyable& operator=(const noncopyable&); }; } // namespace detail using asio::detail::noncopyable; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NONCOPYABLE_HPP galera-26.4.3/asio/asio/detail/deadline_timer_service.hpp0000664000177500017540000001465313540715002021715 0ustar dbartmy// // detail/deadline_timer_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP #define ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue.hpp" #include "asio/detail/timer_scheduler.hpp" #include "asio/detail/wait_handler.hpp" #include "asio/detail/wait_op.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include # include #endif // defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class deadline_timer_service { public: // The time type. typedef typename Time_Traits::time_type time_type; // The duration type. typedef typename Time_Traits::duration_type duration_type; // The implementation type of the timer. This type is dependent on the // underlying implementation of the timer service. struct implementation_type : private asio::detail::noncopyable { time_type expiry; bool might_have_pending_waits; typename timer_queue::per_timer_data timer_data; }; // Constructor. deadline_timer_service(asio::io_service& io_service) : scheduler_(asio::use_service(io_service)) { scheduler_.init_task(); scheduler_.add_timer_queue(timer_queue_); } // Destructor. ~deadline_timer_service() { scheduler_.remove_timer_queue(timer_queue_); } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Construct a new timer implementation. void construct(implementation_type& impl) { impl.expiry = time_type(); impl.might_have_pending_waits = false; } // Destroy a timer implementation. void destroy(implementation_type& impl) { asio::error_code ec; cancel(impl, ec); } // Cancel any asynchronous wait operations associated with the timer. std::size_t cancel(implementation_type& impl, asio::error_code& ec) { if (!impl.might_have_pending_waits) { ec = asio::error_code(); return 0; } ASIO_HANDLER_OPERATION(("deadline_timer", &impl, "cancel")); std::size_t count = scheduler_.cancel_timer(timer_queue_, impl.timer_data); impl.might_have_pending_waits = false; ec = asio::error_code(); return count; } // Cancels one asynchronous wait operation associated with the timer. std::size_t cancel_one(implementation_type& impl, asio::error_code& ec) { if (!impl.might_have_pending_waits) { ec = asio::error_code(); return 0; } ASIO_HANDLER_OPERATION(("deadline_timer", &impl, "cancel_one")); std::size_t count = scheduler_.cancel_timer( timer_queue_, impl.timer_data, 1); if (count == 0) impl.might_have_pending_waits = false; ec = asio::error_code(); return count; } // Get the expiry time for the timer as an absolute time. time_type expires_at(const implementation_type& impl) const { return impl.expiry; } // Set the expiry time for the timer as an absolute time. std::size_t expires_at(implementation_type& impl, const time_type& expiry_time, asio::error_code& ec) { std::size_t count = cancel(impl, ec); impl.expiry = expiry_time; ec = asio::error_code(); return count; } // Get the expiry time for the timer relative to now. duration_type expires_from_now(const implementation_type& impl) const { return Time_Traits::subtract(expires_at(impl), Time_Traits::now()); } // Set the expiry time for the timer relative to now. std::size_t expires_from_now(implementation_type& impl, const duration_type& expiry_time, asio::error_code& ec) { return expires_at(impl, Time_Traits::add(Time_Traits::now(), expiry_time), ec); } // Perform a blocking wait on the timer. void wait(implementation_type& impl, asio::error_code& ec) { time_type now = Time_Traits::now(); ec = asio::error_code(); while (Time_Traits::less_than(now, impl.expiry) && !ec) { this->do_wait(Time_Traits::to_posix_duration( Time_Traits::subtract(impl.expiry, now)), ec); now = Time_Traits::now(); } } // Start an asynchronous wait on the timer. template void async_wait(implementation_type& impl, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef wait_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); impl.might_have_pending_waits = true; ASIO_HANDLER_CREATION((p.p, "deadline_timer", &impl, "async_wait")); scheduler_.schedule_timer(timer_queue_, impl.expiry, impl.timer_data, p.p); p.v = p.p = 0; } private: // Helper function to wait given a duration type. The duration type should // either be of type boost::posix_time::time_duration, or implement the // required subset of its interface. template void do_wait(const Duration& timeout, asio::error_code& ec) { #if defined(ASIO_WINDOWS_RUNTIME) std::this_thread::sleep_for( std::chrono::seconds(timeout.total_seconds()) + std::chrono::microseconds(timeout.total_microseconds())); ec = asio::error_code(); #else // defined(ASIO_WINDOWS_RUNTIME) ::timeval tv; tv.tv_sec = timeout.total_seconds(); tv.tv_usec = timeout.total_microseconds() % 1000000; socket_ops::select(0, 0, 0, 0, &tv, ec); #endif // defined(ASIO_WINDOWS_RUNTIME) } // The queue of timers. timer_queue timer_queue_; // The object that schedules and executes timers. Usually a reactor. timer_scheduler& scheduler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP galera-26.4.3/asio/asio/detail/reactor_op_queue.hpp0000664000177500017540000001135213540715002020562 0ustar dbartmy// // detail/reactor_op_queue.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_OP_QUEUE_HPP #define ASIO_DETAIL_REACTOR_OP_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/hash_map.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactor_op_queue : private noncopyable { public: typedef Descriptor key_type; struct mapped_type : op_queue { mapped_type() {} mapped_type(const mapped_type&) {} void operator=(const mapped_type&) {} }; typedef typename hash_map::value_type value_type; typedef typename hash_map::iterator iterator; // Constructor. reactor_op_queue() : operations_() { } // Obtain iterators to all registered descriptors. iterator begin() { return operations_.begin(); } iterator end() { return operations_.end(); } // Add a new operation to the queue. Returns true if this is the only // operation for the given descriptor, in which case the reactor's event // demultiplexing function call may need to be interrupted and restarted. bool enqueue_operation(Descriptor descriptor, reactor_op* op) { std::pair entry = operations_.insert(value_type(descriptor, mapped_type())); entry.first->second.push(op); return entry.second; } // Cancel all operations associated with the descriptor identified by the // supplied iterator. Any operations pending for the descriptor will be // cancelled. Returns true if any operations were cancelled, in which case // the reactor's event demultiplexing function may need to be interrupted and // restarted. bool cancel_operations(iterator i, op_queue& ops, const asio::error_code& ec = asio::error::operation_aborted) { if (i != operations_.end()) { while (reactor_op* op = i->second.front()) { op->ec_ = ec; i->second.pop(); ops.push(op); } operations_.erase(i); return true; } return false; } // Cancel all operations associated with the descriptor. Any operations // pending for the descriptor will be cancelled. Returns true if any // operations were cancelled, in which case the reactor's event // demultiplexing function may need to be interrupted and restarted. bool cancel_operations(Descriptor descriptor, op_queue& ops, const asio::error_code& ec = asio::error::operation_aborted) { return this->cancel_operations(operations_.find(descriptor), ops, ec); } // Whether there are no operations in the queue. bool empty() const { return operations_.empty(); } // Determine whether there are any operations associated with the descriptor. bool has_operation(Descriptor descriptor) const { return operations_.find(descriptor) != operations_.end(); } // Perform the operations corresponding to the descriptor identified by the // supplied iterator. Returns true if there are still unfinished operations // queued for the descriptor. bool perform_operations(iterator i, op_queue& ops) { if (i != operations_.end()) { while (reactor_op* op = i->second.front()) { if (op->perform()) { i->second.pop(); ops.push(op); } else { return true; } } operations_.erase(i); } return false; } // Perform the operations corresponding to the descriptor. Returns true if // there are still unfinished operations queued for the descriptor. bool perform_operations(Descriptor descriptor, op_queue& ops) { return this->perform_operations(operations_.find(descriptor), ops); } // Get all operations owned by the queue. void get_all_operations(op_queue& ops) { iterator i = operations_.begin(); while (i != operations_.end()) { iterator op_iter = i++; ops.push(op_iter->second); operations_.erase(op_iter); } } private: // The operations that are currently executing asynchronously. hash_map operations_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTOR_OP_QUEUE_HPP galera-26.4.3/asio/asio/detail/reactive_socket_recvfrom_op.hpp0000664000177500017540000001042013540715002022767 0ustar dbartmy// // detail/reactive_socket_recvfrom_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recvfrom_op_base : public reactor_op { public: reactive_socket_recvfrom_op_base(socket_type socket, int protocol_type, const MutableBufferSequence& buffers, Endpoint& endpoint, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_recvfrom_op_base::do_perform, complete_func), socket_(socket), protocol_type_(protocol_type), buffers_(buffers), sender_endpoint_(endpoint), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_recvfrom_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); std::size_t addr_len = o->sender_endpoint_.capacity(); bool result = socket_ops::non_blocking_recvfrom(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->sender_endpoint_.data(), &addr_len, o->ec_, o->bytes_transferred_); if (result && !o->ec_) o->sender_endpoint_.resize(addr_len); return result; } private: socket_type socket_; int protocol_type_; MutableBufferSequence buffers_; Endpoint& sender_endpoint_; socket_base::message_flags flags_; }; template class reactive_socket_recvfrom_op : public reactive_socket_recvfrom_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvfrom_op); reactive_socket_recvfrom_op(socket_type socket, int protocol_type, const MutableBufferSequence& buffers, Endpoint& endpoint, socket_base::message_flags flags, Handler& handler) : reactive_socket_recvfrom_op_base( socket, protocol_type, buffers, endpoint, flags, &reactive_socket_recvfrom_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recvfrom_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP galera-26.4.3/asio/asio/detail/socket_select_interrupter.hpp0000664000177500017540000000462513540715002022520 0ustar dbartmy// // detail/socket_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class socket_select_interrupter { public: // Constructor. ASIO_DECL socket_select_interrupter(); // Destructor. ASIO_DECL ~socket_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. socket_type read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. socket_type read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. socket_type write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/socket_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP galera-26.4.3/asio/asio/detail/std_event.hpp0000664000177500017540000000701113540715002017211 0ustar dbartmy// // detail/std_event.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_EVENT_HPP #define ASIO_DETAIL_STD_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event : private noncopyable { public: // Constructor. std_event() : state_(0) { } // Destructor. ~std_event() { } // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; cond_.notify_all(); } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) cond_.notify_one(); } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); cond_.notify_one(); return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); unique_lock_adapter u_lock(lock); while ((state_ & 1) == 0) { waiter w(state_); cond_.wait(u_lock.unique_lock_); } } // Timed wait for the event to become signalled. template bool wait_for_usec(Lock& lock, long usec) { ASIO_ASSERT(lock.locked()); unique_lock_adapter u_lock(lock); if ((state_ & 1) == 0) { waiter w(state_); cond_.wait_for(u_lock.unique_lock_, std::chrono::microseconds(usec)); } return (state_ & 1) != 0; } private: // Helper class to temporarily adapt a scoped_lock into a unique_lock so that // it can be passed to std::condition_variable::wait(). struct unique_lock_adapter { template explicit unique_lock_adapter(Lock& lock) : unique_lock_(lock.mutex().mutex_, std::adopt_lock) { } ~unique_lock_adapter() { unique_lock_.release(); } std::unique_lock unique_lock_; }; // Helper to increment and decrement the state to track outstanding waiters. class waiter { public: explicit waiter(std::size_t& state) : state_(state) { state_ += 2; } ~waiter() { state_ -= 2; } private: std::size_t& state_; }; std::condition_variable cond_; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_EVENT_HPP galera-26.4.3/asio/asio/detail/win_iocp_thread_info.hpp0000664000177500017540000000147413540715002021376 0ustar dbartmy// // detail/win_iocp_thread_info.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP #define ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_iocp_thread_info : public thread_info_base { }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP galera-26.4.3/asio/asio/detail/reactive_socket_accept_op.hpp0000664000177500017540000001055513540715002022414 0ustar dbartmy// // detail/reactive_socket_accept_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_accept_op_base : public reactor_op { public: reactive_socket_accept_op_base(socket_type socket, socket_ops::state_type state, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, func_type complete_func) : reactor_op(&reactive_socket_accept_op_base::do_perform, complete_func), socket_(socket), state_(state), peer_(peer), protocol_(protocol), peer_endpoint_(peer_endpoint) { } static bool do_perform(reactor_op* base) { reactive_socket_accept_op_base* o( static_cast(base)); std::size_t addrlen = o->peer_endpoint_ ? o->peer_endpoint_->capacity() : 0; socket_type new_socket = invalid_socket; bool result = socket_ops::non_blocking_accept(o->socket_, o->state_, o->peer_endpoint_ ? o->peer_endpoint_->data() : 0, o->peer_endpoint_ ? &addrlen : 0, o->ec_, new_socket); // On success, assign new connection to peer socket object. if (new_socket != invalid_socket) { socket_holder new_socket_holder(new_socket); if (o->peer_endpoint_) o->peer_endpoint_->resize(addrlen); if (!o->peer_.assign(o->protocol_, new_socket, o->ec_)) new_socket_holder.release(); } return result; } private: socket_type socket_; socket_ops::state_type state_; Socket& peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; }; template class reactive_socket_accept_op : public reactive_socket_accept_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_accept_op); reactive_socket_accept_op(socket_type socket, socket_ops::state_type state, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, Handler& handler) : reactive_socket_accept_op_base(socket, state, peer, protocol, peer_endpoint, &reactive_socket_accept_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_accept_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP galera-26.4.3/asio/asio/detail/winrt_ssocket_service_base.hpp0000664000177500017540000002670713540715002022643 0ustar dbartmy// // detail/winrt_ssocket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/winrt_async_manager.hpp" #include "asio/detail/winrt_socket_recv_op.hpp" #include "asio/detail/winrt_socket_send_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_ssocket_service_base { public: // The native type of a socket. typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type; // The implementation type of the socket. struct base_implementation_type { // Default constructor. base_implementation_type() : socket_(nullptr), next_(0), prev_(0) { } // The underlying native socket. native_handle_type socket_; // Pointers to adjacent socket implementations in linked list. base_implementation_type* next_; base_implementation_type* prev_; }; // Constructor. ASIO_DECL winrt_ssocket_service_base( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type&); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, winrt_ssocket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != nullptr; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Get the native socket representation. native_handle_type native_handle(base_implementation_type& impl) { return impl.socket_; } // Cancel all operations associated with the socket. asio::error_code cancel(base_implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return false; } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return 0; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type&, IO_Control_Command&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type&) const { return false; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type&) const { return false; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type&, socket_base::shutdown_type, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Send the given data to the peer. template std::size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return do_send(impl, buffer_sequence_adapter::first(buffers), flags, ec); } // Wait until data can be sent without blocking. std::size_t send(base_implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send")); start_send_op(impl, buffer_sequence_adapter::first(buffers), flags, p.p, is_continuation); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.get_io_service().post( detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data from the peer. Returns the number of bytes received. template std::size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return do_receive(impl, buffer_sequence_adapter::first(buffers), flags, ec); } // Wait until data can be received without blocking. std::size_t receive(base_implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive")); start_receive_op(impl, buffer_sequence_adapter::first(buffers), flags, p.p, is_continuation); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.get_io_service().post( detail::bind_handler(handler, ec, bytes_transferred)); } protected: // Helper function to obtain endpoints associated with the connection. ASIO_DECL std::size_t do_get_endpoint( const base_implementation_type& impl, bool local, void* addr, std::size_t addr_len, asio::error_code& ec) const; // Helper function to set a socket option. ASIO_DECL asio::error_code do_set_option( base_implementation_type& impl, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec); // Helper function to get a socket option. ASIO_DECL void do_get_option( const base_implementation_type& impl, int level, int optname, void* optval, std::size_t* optlen, asio::error_code& ec) const; // Helper function to perform a synchronous connect. ASIO_DECL asio::error_code do_connect( base_implementation_type& impl, const void* addr, asio::error_code& ec); // Helper function to start an asynchronous connect. ASIO_DECL void start_connect_op( base_implementation_type& impl, const void* addr, winrt_async_op* op, bool is_continuation); // Helper function to perform a synchronous send. ASIO_DECL std::size_t do_send( base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, asio::error_code& ec); // Helper function to start an asynchronous send. ASIO_DECL void start_send_op(base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation); // Helper function to perform a synchronous receive. ASIO_DECL std::size_t do_receive( base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, asio::error_code& ec); // Helper function to start an asynchronous receive. ASIO_DECL void start_receive_op(base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation); // The io_service implementation used for delivering completions. io_service_impl& io_service_; // The manager that keeps track of outstanding operations. winrt_async_manager& async_manager_; // Mutex to protect access to the linked list of implementations. asio::detail::mutex mutex_; // The head of a linked list of all implementations. base_implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winrt_ssocket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP galera-26.4.3/asio/asio/detail/kqueue_reactor.hpp0000664000177500017540000001567213540715002020250 0ustar dbartmy// // detail/kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_KQUEUE_REACTOR_HPP #define ASIO_DETAIL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include #include #include #include #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/object_pool.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" // Older versions of Mac OS X may not define EV_OOBAND. #if !defined(EV_OOBAND) # define EV_OOBAND EV_FLAG1 #endif // !defined(EV_OOBAND) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class kqueue_reactor : public asio::detail::service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor queues. struct descriptor_state { friend class kqueue_reactor; friend class object_pool_access; descriptor_state* next_; descriptor_state* prev_; mutex mutex_; int descriptor_; int num_kevents_; // 1 == read only, 2 == read and write op_queue op_queue_[max_ops]; bool shutdown_; }; // Per-descriptor data. typedef descriptor_state* per_descriptor_data; // Constructor. ASIO_DECL kqueue_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~kqueue_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data, bool closing); // Remote the descriptor's registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run the kqueue loop. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the kqueue loop. ASIO_DECL void interrupt(); private: // Create the kqueue file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_kqueue_create(); // Allocate a new descriptor state object. ASIO_DECL descriptor_state* allocate_descriptor_state(); // Free an existing descriptor state object. ASIO_DECL void free_descriptor_state(descriptor_state* s); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the kevent call. ASIO_DECL timespec* get_timeout(timespec& ts); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. mutex mutex_; // The kqueue file descriptor. int kqueue_fd_; // The interrupter is used to break a blocking kevent call. select_interrupter interrupter_; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; // Mutex to protect access to the registered descriptors. mutex registered_descriptors_mutex_; // Keep track of all registered descriptors. object_pool registered_descriptors_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/kqueue_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/kqueue_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_KQUEUE_REACTOR_HPP galera-26.4.3/asio/asio/detail/reactive_descriptor_service.hpp0000664000177500017540000002462413540715002023007 0ustar dbartmy// // detail/reactive_descriptor_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/buffer.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/descriptor_read_op.hpp" #include "asio/detail/descriptor_write_op.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_descriptor_service { public: // The native type of a descriptor. typedef int native_handle_type; // The implementation type of the descriptor. class implementation_type : private asio::detail::noncopyable { public: // Default constructor. implementation_type() : descriptor_(-1), state_(0) { } private: // Only this service will have access to the internal values. friend class reactive_descriptor_service; // The native descriptor representation. int descriptor_; // The current state of the descriptor. descriptor_ops::state_type state_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; }; // Constructor. ASIO_DECL reactive_descriptor_service( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new descriptor implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new descriptor implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another descriptor implementation. ASIO_DECL void move_assign(implementation_type& impl, reactive_descriptor_service& other_service, implementation_type& other_impl); // Destroy a descriptor implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native descriptor to a descriptor implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec); // Determine whether the descriptor is open. bool is_open(const implementation_type& impl) const { return impl.descriptor_ != -1; } // Destroy a descriptor implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native descriptor representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.descriptor_; } // Release ownership of the native descriptor representation. ASIO_DECL native_handle_type release(implementation_type& impl); // Cancel all operations associated with the descriptor. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Perform an IO control command on the descriptor. template asio::error_code io_control(implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { descriptor_ops::ioctl(impl.descriptor_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the descriptor. bool non_blocking(const implementation_type& impl) const { return (impl.state_ & descriptor_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the descriptor. asio::error_code non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { descriptor_ops::set_user_non_blocking( impl.descriptor_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native descriptor implementation. bool native_non_blocking(const implementation_type& impl) const { return (impl.state_ & descriptor_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native descriptor implementation. asio::error_code native_non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { descriptor_ops::set_internal_non_blocking( impl.descriptor_, impl.state_, mode, ec); return ec; } // Write some data to the descriptor. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return descriptor_ops::sync_write(impl.descriptor_, impl.state_, bufs.buffers(), bufs.count(), bufs.all_empty(), ec); } // Wait until data can be written without blocking. size_t write_some(implementation_type& impl, const null_buffers&, asio::error_code& ec) { // Wait for descriptor to become ready. descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec); return 0; } // Start an asynchronous write. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef descriptor_write_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.descriptor_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_write_some")); start_op(impl, reactor::write_op, p.p, is_continuation, true, buffer_sequence_adapter::all_empty(buffers)); p.v = p.p = 0; } // Start an asynchronous wait until data can be written without blocking. template void async_write_some(implementation_type& impl, const null_buffers&, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_write_some(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Read some data from the stream. Returns the number of bytes read. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return descriptor_ops::sync_read(impl.descriptor_, impl.state_, bufs.buffers(), bufs.count(), bufs.all_empty(), ec); } // Wait until data can be read without blocking. size_t read_some(implementation_type& impl, const null_buffers&, asio::error_code& ec) { // Wait for descriptor to become ready. descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec); return 0; } // Start an asynchronous read. The buffer for the data being read must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef descriptor_read_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.descriptor_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_read_some")); start_op(impl, reactor::read_op, p.p, is_continuation, true, buffer_sequence_adapter::all_empty(buffers)); p.v = p.p = 0; } // Wait until data can be read without blocking. template void async_read_some(implementation_type& impl, const null_buffers&, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "descriptor", &impl, "async_read_some(null_buffers)")); start_op(impl, reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } private: // Start the asynchronous operation. ASIO_DECL void start_op(implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop); // The selector that performs event demultiplexing for the service. reactor& reactor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_descriptor_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP galera-26.4.3/asio/asio/detail/tss_ptr.hpp0000664000177500017540000000326013540715002016716 0ustar dbartmy// // detail/tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TSS_PTR_HPP #define ASIO_DETAIL_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_tss_ptr.hpp" #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) # include "asio/detail/keyword_tss_ptr.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_tss_ptr.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_tss_ptr.hpp" #else # error Only Windows and POSIX are supported! #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class tss_ptr #if !defined(ASIO_HAS_THREADS) : public null_tss_ptr #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) : public keyword_tss_ptr #elif defined(ASIO_WINDOWS) : public win_tss_ptr #elif defined(ASIO_HAS_PTHREADS) : public posix_tss_ptr #endif { public: void operator=(T* value) { #if !defined(ASIO_HAS_THREADS) null_tss_ptr::operator=(value); #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) keyword_tss_ptr::operator=(value); #elif defined(ASIO_WINDOWS) win_tss_ptr::operator=(value); #elif defined(ASIO_HAS_PTHREADS) posix_tss_ptr::operator=(value); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TSS_PTR_HPP galera-26.4.3/asio/asio/detail/old_win_sdk_compat.hpp0000664000177500017540000001042513540715002021060 0ustar dbartmy// // detail/old_win_sdk_compat.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP #define ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Guess whether we are building against on old Platform SDK. #if !defined(IN6ADDR_ANY_INIT) #define ASIO_HAS_OLD_WIN_SDK 1 #endif // !defined(IN6ADDR_ANY_INIT) #if defined(ASIO_HAS_OLD_WIN_SDK) // Emulation of types that are missing from old Platform SDKs. // // N.B. this emulation is also used if building for a Windows 2000 target with // a recent (i.e. Vista or later) SDK, as the SDK does not provide IPv6 support // in that case. #include "asio/detail/push_options.hpp" namespace asio { namespace detail { enum { sockaddr_storage_maxsize = 128, // Maximum size. sockaddr_storage_alignsize = (sizeof(__int64)), // Desired alignment. sockaddr_storage_pad1size = (sockaddr_storage_alignsize - sizeof(short)), sockaddr_storage_pad2size = (sockaddr_storage_maxsize - (sizeof(short) + sockaddr_storage_pad1size + sockaddr_storage_alignsize)) }; struct sockaddr_storage_emulation { short ss_family; char __ss_pad1[sockaddr_storage_pad1size]; __int64 __ss_align; char __ss_pad2[sockaddr_storage_pad2size]; }; struct in6_addr_emulation { union { u_char Byte[16]; u_short Word[8]; } u; }; #if !defined(s6_addr) # define _S6_un u # define _S6_u8 Byte # define s6_addr _S6_un._S6_u8 #endif // !defined(s6_addr) struct sockaddr_in6_emulation { short sin6_family; u_short sin6_port; u_long sin6_flowinfo; in6_addr_emulation sin6_addr; u_long sin6_scope_id; }; struct ipv6_mreq_emulation { in6_addr_emulation ipv6mr_multiaddr; unsigned int ipv6mr_interface; }; struct addrinfo_emulation { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; size_t ai_addrlen; char* ai_canonname; sockaddr* ai_addr; addrinfo_emulation* ai_next; }; #if !defined(AI_PASSIVE) # define AI_PASSIVE 0x1 #endif #if !defined(AI_CANONNAME) # define AI_CANONNAME 0x2 #endif #if !defined(AI_NUMERICHOST) # define AI_NUMERICHOST 0x4 #endif #if !defined(EAI_AGAIN) # define EAI_AGAIN WSATRY_AGAIN #endif #if !defined(EAI_BADFLAGS) # define EAI_BADFLAGS WSAEINVAL #endif #if !defined(EAI_FAIL) # define EAI_FAIL WSANO_RECOVERY #endif #if !defined(EAI_FAMILY) # define EAI_FAMILY WSAEAFNOSUPPORT #endif #if !defined(EAI_MEMORY) # define EAI_MEMORY WSA_NOT_ENOUGH_MEMORY #endif #if !defined(EAI_NODATA) # define EAI_NODATA WSANO_DATA #endif #if !defined(EAI_NONAME) # define EAI_NONAME WSAHOST_NOT_FOUND #endif #if !defined(EAI_SERVICE) # define EAI_SERVICE WSATYPE_NOT_FOUND #endif #if !defined(EAI_SOCKTYPE) # define EAI_SOCKTYPE WSAESOCKTNOSUPPORT #endif #if !defined(NI_NOFQDN) # define NI_NOFQDN 0x01 #endif #if !defined(NI_NUMERICHOST) # define NI_NUMERICHOST 0x02 #endif #if !defined(NI_NAMEREQD) # define NI_NAMEREQD 0x04 #endif #if !defined(NI_NUMERICSERV) # define NI_NUMERICSERV 0x08 #endif #if !defined(NI_DGRAM) # define NI_DGRAM 0x10 #endif #if !defined(IPPROTO_IPV6) # define IPPROTO_IPV6 41 #endif #if !defined(IPV6_UNICAST_HOPS) # define IPV6_UNICAST_HOPS 4 #endif #if !defined(IPV6_MULTICAST_IF) # define IPV6_MULTICAST_IF 9 #endif #if !defined(IPV6_MULTICAST_HOPS) # define IPV6_MULTICAST_HOPS 10 #endif #if !defined(IPV6_MULTICAST_LOOP) # define IPV6_MULTICAST_LOOP 11 #endif #if !defined(IPV6_JOIN_GROUP) # define IPV6_JOIN_GROUP 12 #endif #if !defined(IPV6_LEAVE_GROUP) # define IPV6_LEAVE_GROUP 13 #endif } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_OLD_WIN_SDK) // Even newer Platform SDKs that support IPv6 may not define IPV6_V6ONLY. #if !defined(IPV6_V6ONLY) # define IPV6_V6ONLY 27 #endif // Some SDKs (e.g. Windows CE) don't define IPPROTO_ICMPV6. #if !defined(IPPROTO_ICMPV6) # define IPPROTO_ICMPV6 58 #endif #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP galera-26.4.3/asio/asio/detail/reactor_op.hpp0000664000177500017540000000256113540715002017360 0ustar dbartmy// // detail/reactor_op.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_OP_HPP #define ASIO_DETAIL_REACTOR_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactor_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The number of bytes transferred, to be passed to the completion handler. std::size_t bytes_transferred_; // Perform the operation. Returns true if it is finished. bool perform() { return perform_func_(this); } protected: typedef bool (*perform_func_type)(reactor_op*); reactor_op(perform_func_type perform_func, func_type complete_func) : operation(complete_func), bytes_transferred_(0), perform_func_(perform_func) { } private: perform_func_type perform_func_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTOR_OP_HPP galera-26.4.3/asio/asio/detail/cstdint.hpp0000664000177500017540000000207313540715002016671 0ustar dbartmy// // detail/cstdint.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CSTDINT_HPP #define ASIO_DETAIL_CSTDINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CSTDINT) # include #else // defined(ASIO_HAS_CSTDINT) # include #endif // defined(ASIO_HAS_CSTDINT) namespace asio { #if defined(ASIO_HAS_CSTDINT) using std::int16_t; using std::uint16_t; using std::int32_t; using std::uint32_t; using std::int64_t; using std::uint64_t; #else // defined(ASIO_HAS_CSTDINT) using boost::int16_t; using boost::uint16_t; using boost::int32_t; using boost::uint32_t; using boost::int64_t; using boost::uint64_t; #endif // defined(ASIO_HAS_CSTDINT) } // namespace asio #endif // ASIO_DETAIL_CSTDINT_HPP galera-26.4.3/asio/asio/detail/signal_handler.hpp0000664000177500017540000000463213540715002020176 0ustar dbartmy// // detail/signal_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_HANDLER_HPP #define ASIO_DETAIL_SIGNAL_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/signal_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class signal_handler : public signal_op { public: ASIO_DEFINE_HANDLER_PTR(signal_handler); signal_handler(Handler& h) : signal_op(&signal_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. signal_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; ASIO_HANDLER_COMPLETION((h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(h->handler_, h->ec_, h->signal_number_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SIGNAL_HANDLER_HPP galera-26.4.3/asio/asio/detail/push_options.hpp0000664000177500017540000000746313540715002017763 0ustar dbartmy// // detail/push_options.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // No header guard #if defined(__COMO__) // Comeau C++ #elif defined(__DMC__) // Digital Mars C++ #elif defined(__INTEL_COMPILER) || defined(__ICL) \ || defined(__ICC) || defined(__ECC) // Intel C++ # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility push (default) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__clang__) // Clang # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if !defined(ASIO_DISABLE_OBJC_WORKAROUND) # if !defined(Protocol) && !defined(id) # define Protocol cpp_Protocol # define id cpp_id # define ASIO_OBJC_WORKAROUND # endif # endif # endif # endif # if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) # pragma GCC visibility push (default) # endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) #elif defined(__GNUC__) // GNU C++ # if defined(__MINGW32__) || defined(__CYGWIN__) # pragma pack (push, 8) # endif # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if !defined(ASIO_DISABLE_OBJC_WORKAROUND) # if !defined(Protocol) && !defined(id) # define Protocol cpp_Protocol # define id cpp_id # define ASIO_OBJC_WORKAROUND # endif # endif # endif # endif # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility push (default) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__KCC) // Kai C++ #elif defined(__sgi) // SGI MIPSpro C++ #elif defined(__DECCXX) // Compaq Tru64 Unix cxx #elif defined(__ghs) // Greenhills C++ #elif defined(__BORLANDC__) // Borland C++ # pragma option push -a8 -b -Ve- -Vx- -w-inl -vi- # pragma nopushoptwarn # pragma nopackwarning # if !defined(__MT__) # error Multithreaded RTL must be selected. # endif // !defined(__MT__) #elif defined(__MWERKS__) // Metrowerks CodeWarrior #elif defined(__SUNPRO_CC) // Sun Workshop Compiler C++ #elif defined(__HP_aCC) // HP aCC #elif defined(__MRC__) || defined(__SC__) // MPW MrCpp or SCpp #elif defined(__IBMCPP__) // IBM Visual Age #elif defined(_MSC_VER) // Microsoft Visual C++ // // Must remain the last #elif since some other vendors (Metrowerks, for example) // also #define _MSC_VER # pragma warning (disable:4103) # pragma warning (push) # pragma warning (disable:4127) # pragma warning (disable:4180) # pragma warning (disable:4244) # pragma warning (disable:4355) # pragma warning (disable:4510) # pragma warning (disable:4512) # pragma warning (disable:4610) # pragma warning (disable:4675) # if defined(_M_IX86) && defined(_Wp64) // The /Wp64 option is broken. If you want to check 64 bit portability, use a // 64 bit compiler! # pragma warning (disable:4311) # pragma warning (disable:4312) # endif // defined(_M_IX86) && defined(_Wp64) # pragma pack (push, 8) // Note that if the /Og optimisation flag is enabled with MSVC6, the compiler // has a tendency to incorrectly optimise away some calls to member template // functions, even though those functions contain code that should not be // optimised away! Therefore we will always disable this optimisation option // for the MSVC6 compiler. # if (_MSC_VER < 1300) # pragma optimize ("g", off) # endif # if !defined(_MT) # error Multithreaded RTL must be selected. # endif // !defined(_MT) # if defined(__cplusplus_cli) || defined(__cplusplus_winrt) # if !defined(ASIO_DISABLE_CLR_WORKAROUND) # if !defined(generic) # define generic cpp_generic # define ASIO_CLR_WORKAROUND # endif # endif # endif #endif galera-26.4.3/asio/asio/detail/handler_alloc_helpers.hpp0000664000177500017540000000375513540715002021542 0ustar dbartmy// // detail/handler_alloc_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP #define ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/handler_alloc_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_allocate and asio_handler_deallocate must be made from // a namespace that does not contain any overloads of these functions. The // asio_handler_alloc_helpers namespace is defined here for that purpose. namespace asio_handler_alloc_helpers { template inline void* allocate(std::size_t s, Handler& h) { #if !defined(ASIO_HAS_HANDLER_HOOKS) return ::operator new(s); #else using asio::asio_handler_allocate; return asio_handler_allocate(s, asio::detail::addressof(h)); #endif } template inline void deallocate(void* p, std::size_t s, Handler& h) { #if !defined(ASIO_HAS_HANDLER_HOOKS) ::operator delete(p); #else using asio::asio_handler_deallocate; asio_handler_deallocate(p, s, asio::detail::addressof(h)); #endif } } // namespace asio_handler_alloc_helpers #define ASIO_DEFINE_HANDLER_PTR(op) \ struct ptr \ { \ Handler* h; \ void* v; \ op* p; \ ~ptr() \ { \ reset(); \ } \ void reset() \ { \ if (p) \ { \ p->~op(); \ p = 0; \ } \ if (v) \ { \ asio_handler_alloc_helpers::deallocate(v, sizeof(op), *h); \ v = 0; \ } \ } \ } \ /**/ #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP galera-26.4.3/asio/asio/detail/resolver_service.hpp0000664000177500017540000000771613540715002020613 0ustar dbartmy// // detail/resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_SERVICE_HPP #define ASIO_DETAIL_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/resolve_endpoint_op.hpp" #include "asio/detail/resolve_op.hpp" #include "asio/detail/resolver_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolver_service : public resolver_service_base { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the background thread that the operation has been cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The query type. typedef asio::ip::basic_resolver_query query_type; // The iterator type. typedef asio::ip::basic_resolver_iterator iterator_type; // Constructor. resolver_service(asio::io_service& io_service) : resolver_service_base(io_service) { } // Resolve a query to a list of entries. iterator_type resolve(implementation_type&, const query_type& query, asio::error_code& ec) { asio::detail::addrinfo_type* address_info = 0; socket_ops::getaddrinfo(query.host_name().c_str(), query.service_name().c_str(), query.hints(), &address_info, ec); auto_addrinfo auto_address_info(address_info); return ec ? iterator_type() : iterator_type::create( address_info, query.host_name(), query.service_name()); } // Asynchronously resolve a query to a list of entries. template void async_resolve(implementation_type& impl, const query_type& query, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef resolve_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl, query, io_service_impl_, handler); ASIO_HANDLER_CREATION((p.p, "resolver", &impl, "async_resolve")); start_resolve_op(p.p); p.v = p.p = 0; } // Resolve an endpoint to a list of entries. iterator_type resolve(implementation_type&, const endpoint_type& endpoint, asio::error_code& ec) { char host_name[NI_MAXHOST]; char service_name[NI_MAXSERV]; socket_ops::sync_getnameinfo(endpoint.data(), endpoint.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV, endpoint.protocol().type(), ec); return ec ? iterator_type() : iterator_type::create( endpoint, host_name, service_name); } // Asynchronously resolve an endpoint to a list of entries. template void async_resolve(implementation_type& impl, const endpoint_type& endpoint, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef resolve_endpoint_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl, endpoint, io_service_impl_, handler); ASIO_HANDLER_CREATION((p.p, "resolver", &impl, "async_resolve")); start_resolve_op(p.p); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_RESOLVER_SERVICE_HPP galera-26.4.3/asio/asio/detail/shared_ptr.hpp0000664000177500017540000000171513540715002017356 0ustar dbartmy// // detail/shared_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SHARED_PTR_HPP #define ASIO_DETAIL_SHARED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SHARED_PTR) # include #else // defined(ASIO_HAS_STD_SHARED_PTR) # include #endif // defined(ASIO_HAS_STD_SHARED_PTR) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_SHARED_PTR) using std::shared_ptr; #else // defined(ASIO_HAS_STD_SHARED_PTR) using boost::shared_ptr; #endif // defined(ASIO_HAS_STD_SHARED_PTR) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_SHARED_PTR_HPP galera-26.4.3/asio/asio/detail/pipe_select_interrupter.hpp0000664000177500017540000000457613540715002022172 0ustar dbartmy// // detail/pipe_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(ASIO_HAS_EVENTFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class pipe_select_interrupter { public: // Constructor. ASIO_DECL pipe_select_interrupter(); // Destructor. ASIO_DECL ~pipe_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. int write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/pipe_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP galera-26.4.3/asio/asio/detail/posix_tss_ptr.hpp0000664000177500017540000000322313540715002020137 0ustar dbartmy// // detail/posix_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_TSS_PTR_HPP #define ASIO_DETAIL_POSIX_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper function to create thread-specific storage. ASIO_DECL void posix_tss_ptr_create(pthread_key_t& key); template class posix_tss_ptr : private noncopyable { public: // Constructor. posix_tss_ptr() { posix_tss_ptr_create(tss_key_); } // Destructor. ~posix_tss_ptr() { ::pthread_key_delete(tss_key_); } // Get the value. operator T*() const { return static_cast(::pthread_getspecific(tss_key_)); } // Set the value. void operator=(T* value) { ::pthread_setspecific(tss_key_, value); } private: // Thread-specific storage to allow unlocked access to determine whether a // thread is a member of the pool. pthread_key_t tss_key_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_tss_ptr.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_TSS_PTR_HPP galera-26.4.3/asio/asio/detail/gcc_sync_fenced_block.hpp0000664000177500017540000000302613540715002021466 0ustar dbartmy// // detail/gcc_sync_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_sync_fenced_block : private noncopyable { public: enum half_or_full_t { half, full }; // Constructor. explicit gcc_sync_fenced_block(half_or_full_t) : value_(0) { __sync_lock_test_and_set(&value_, 1); } // Destructor. ~gcc_sync_fenced_block() { __sync_lock_release(&value_); } private: int value_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) // && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) // && !defined(__INTEL_COMPILER) && !defined(__ICL) // && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) #endif // ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/wince_thread.hpp0000664000177500017540000000444613540715002017663 0ustar dbartmy// // detail/wince_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINCE_THREAD_HPP #define ASIO_DETAIL_WINCE_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && defined(UNDER_CE) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD WINAPI wince_thread_function(LPVOID arg); class wince_thread : private noncopyable { public: // Constructor. template wince_thread(Function f, unsigned int = 0) { std::auto_ptr arg(new func(f)); DWORD thread_id = 0; thread_ = ::CreateThread(0, 0, wince_thread_function, arg.get(), 0, &thread_id); if (!thread_) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } arg.release(); } // Destructor. ~wince_thread() { ::CloseHandle(thread_); } // Wait for the thread to exit. void join() { ::WaitForSingleObject(thread_, INFINITE); } private: friend DWORD WINAPI wince_thread_function(LPVOID arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ::HANDLE thread_; }; inline DWORD WINAPI wince_thread_function(LPVOID arg) { std::auto_ptr func( static_cast(arg)); func->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && defined(UNDER_CE) #endif // ASIO_DETAIL_WINCE_THREAD_HPP galera-26.4.3/asio/asio/detail/reactive_socket_recv_op.hpp0000664000177500017540000000757213540715002022121 0ustar dbartmy// // detail/reactive_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recv_op_base : public reactor_op { public: reactive_socket_recv_op_base(socket_type socket, socket_ops::state_type state, const MutableBufferSequence& buffers, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_recv_op_base::do_perform, complete_func), socket_(socket), state_(state), buffers_(buffers), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_recv_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_recv(o->socket_, bufs.buffers(), bufs.count(), o->flags_, (o->state_ & socket_ops::stream_oriented) != 0, o->ec_, o->bytes_transferred_); } private: socket_type socket_; socket_ops::state_type state_; MutableBufferSequence buffers_; socket_base::message_flags flags_; }; template class reactive_socket_recv_op : public reactive_socket_recv_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recv_op); reactive_socket_recv_op(socket_type socket, socket_ops::state_type state, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) : reactive_socket_recv_op_base(socket, state, buffers, flags, &reactive_socket_recv_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP galera-26.4.3/asio/asio/detail/array.hpp0000664000177500017540000000160613540715002016340 0ustar dbartmy// // detail/array.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ARRAY_HPP #define ASIO_DETAIL_ARRAY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_ARRAY) # include #else // defined(ASIO_HAS_STD_ARRAY) # include #endif // defined(ASIO_HAS_STD_ARRAY) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_ARRAY) using std::array; #else // defined(ASIO_HAS_STD_ARRAY) using boost::array; #endif // defined(ASIO_HAS_STD_ARRAY) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ARRAY_HPP galera-26.4.3/asio/asio/detail/throw_error.hpp0000664000177500017540000000231413540715002017573 0ustar dbartmy// // detail/throw_error.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THROW_ERROR_HPP #define ASIO_DETAIL_THROW_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { ASIO_DECL void do_throw_error(const asio::error_code& err); ASIO_DECL void do_throw_error(const asio::error_code& err, const char* location); inline void throw_error(const asio::error_code& err) { if (err) do_throw_error(err); } inline void throw_error(const asio::error_code& err, const char* location) { if (err) do_throw_error(err, location); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/throw_error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_THROW_ERROR_HPP galera-26.4.3/asio/asio/detail/timer_queue_base.hpp0000664000177500017540000000317513540715002020543 0ustar dbartmy// // detail/timer_queue_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_BASE_HPP #define ASIO_DETAIL_TIMER_QUEUE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class timer_queue_base : private noncopyable { public: // Constructor. timer_queue_base() : next_(0) {} // Destructor. virtual ~timer_queue_base() {} // Whether there are no timers in the queue. virtual bool empty() const = 0; // Get the time to wait until the next timer. virtual long wait_duration_msec(long max_duration) const = 0; // Get the time to wait until the next timer. virtual long wait_duration_usec(long max_duration) const = 0; // Dequeue all ready timers. virtual void get_ready_timers(op_queue& ops) = 0; // Dequeue all timers. virtual void get_all_timers(op_queue& ops) = 0; private: friend class timer_queue_set; // Next timer queue in the set. timer_queue_base* next_; }; template class timer_queue; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TIMER_QUEUE_BASE_HPP galera-26.4.3/asio/asio/detail/reactive_socket_send_op.hpp0000664000177500017540000000727313540715002022111 0ustar dbartmy// // detail/reactive_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_send_op_base : public reactor_op { public: reactive_socket_send_op_base(socket_type socket, const ConstBufferSequence& buffers, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_send_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_send_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_send(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->ec_, o->bytes_transferred_); } private: socket_type socket_; ConstBufferSequence buffers_; socket_base::message_flags flags_; }; template class reactive_socket_send_op : public reactive_socket_send_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_send_op); reactive_socket_send_op(socket_type socket, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) : reactive_socket_send_op_base(socket, buffers, flags, &reactive_socket_send_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP galera-26.4.3/asio/asio/detail/reactive_null_buffers_op.hpp0000664000177500017540000000522213540715002022266 0ustar dbartmy// // detail/reactive_null_buffers_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP #define ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_null_buffers_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(reactive_null_buffers_op); reactive_null_buffers_op(Handler& handler) : reactor_op(&reactive_null_buffers_op::do_perform, &reactive_null_buffers_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static bool do_perform(reactor_op*) { return true; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_null_buffers_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_service_base.hpp0000664000177500017540000004646013540715002023122 0ustar dbartmy// // detail/win_iocp_socket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/win_iocp_null_buffers_op.hpp" #include "asio/detail/win_iocp_socket_connect_op.hpp" #include "asio/detail/win_iocp_socket_send_op.hpp" #include "asio/detail/win_iocp_socket_recv_op.hpp" #include "asio/detail/win_iocp_socket_recvmsg_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_socket_service_base { public: // The implementation type of the socket. struct base_implementation_type { // The native socket representation. socket_type socket_; // The current state of the socket. socket_ops::state_type state_; // We use a shared pointer as a cancellation token here to work around the // broken Windows support for cancellation. MSDN says that when you call // closesocket any outstanding WSARecv or WSASend operations will complete // with the error ERROR_OPERATION_ABORTED. In practice they complete with // ERROR_NETNAME_DELETED, which means you can't tell the difference between // a local cancellation and the socket being hard-closed by the peer. socket_ops::shared_cancel_token_type cancel_token_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; #if defined(ASIO_ENABLE_CANCELIO) // The ID of the thread from which it is safe to cancel asynchronous // operations. 0 means no asynchronous operations have been started yet. // ~0 means asynchronous operations have been started from more than one // thread, and cancellation is not supported for the socket. DWORD safe_cancellation_thread_id_; #endif // defined(ASIO_ENABLE_CANCELIO) // Pointers to adjacent socket implementations in linked list. base_implementation_type* next_; base_implementation_type* prev_; }; // Constructor. ASIO_DECL win_iocp_socket_service_base( asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type& impl); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, win_iocp_socket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != invalid_socket; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Cancel all operations associated with the socket. ASIO_DECL asio::error_code cancel( base_implementation_type& impl, asio::error_code& ec); // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::sockatmark(impl.socket_, ec); } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::available(impl.socket_, ec); } // Place the socket into the state where it will listen for new connections. asio::error_code listen(base_implementation_type& impl, int backlog, asio::error_code& ec) { socket_ops::listen(impl.socket_, backlog, ec); return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { socket_ops::ioctl(impl.socket_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { socket_ops::shutdown(impl.socket_, what, ec); return ec; } // Send the given data to the peer. Returns the number of bytes sent. template size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_send(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be sent without blocking. size_t send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send")); buffer_sequence_adapter bufs(buffers); start_send_op(impl, bufs.buffers(), bufs.count(), flags, (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(), p.p); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send(null_buffers)")); start_reactor_op(impl, reactor::write_op, p.p); p.v = p.p = 0; } // Receive some data from the peer. Returns the number of bytes received. template size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recv(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be received without blocking. size_t receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.state_, impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive")); buffer_sequence_adapter bufs(buffers); start_receive_op(impl, bufs.buffers(), bufs.count(), flags, (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(), p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive(null_buffers)")); start_null_buffers_receive_op(impl, flags, p.p); p.v = p.p = 0; } // Receive some data with associated flags. Returns the number of bytes // received. template size_t receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recvmsg(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), in_flags, out_flags, ec); } // Wait until data can be received without blocking. size_t receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recvmsg_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, out_flags, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags")); buffer_sequence_adapter bufs(buffers); start_receive_op(impl, bufs.buffers(), bufs.count(), in_flags, false, p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_with_flags(null_buffers)")); // Reset out_flags since it can be given no sensible value at this time. out_flags = 0; start_null_buffers_receive_op(impl, in_flags, p.p); p.v = p.p = 0; } // Helper function to restart an asynchronous accept operation. ASIO_DECL void restart_accept_op(socket_type s, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op); protected: // Open a new socket implementation. ASIO_DECL asio::error_code do_open( base_implementation_type& impl, int family, int type, int protocol, asio::error_code& ec); // Assign a native socket to a socket implementation. ASIO_DECL asio::error_code do_assign( base_implementation_type& impl, int type, socket_type native_socket, asio::error_code& ec); // Helper function to start an asynchronous send operation. ASIO_DECL void start_send_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op); // Helper function to start an asynchronous send_to operation. ASIO_DECL void start_send_to_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, const socket_addr_type* addr, int addrlen, socket_base::message_flags flags, operation* op); // Helper function to start an asynchronous receive operation. ASIO_DECL void start_receive_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op); // Helper function to start an asynchronous null_buffers receive operation. ASIO_DECL void start_null_buffers_receive_op( base_implementation_type& impl, socket_base::message_flags flags, reactor_op* op); // Helper function to start an asynchronous receive_from operation. ASIO_DECL void start_receive_from_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, socket_base::message_flags flags, int* addrlen, operation* op); // Helper function to start an asynchronous accept operation. ASIO_DECL void start_accept_op(base_implementation_type& impl, bool peer_is_open, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op); // Start an asynchronous read or write operation using the reactor. ASIO_DECL void start_reactor_op(base_implementation_type& impl, int op_type, reactor_op* op); // Start the asynchronous connect operation using the reactor. ASIO_DECL void start_connect_op(base_implementation_type& impl, int family, int type, const socket_addr_type* remote_addr, std::size_t remote_addrlen, win_iocp_socket_connect_op_base* op); // Helper function to close a socket when the associated object is being // destroyed. ASIO_DECL void close_for_destruction(base_implementation_type& impl); // Update the ID of the thread from which cancellation is safe. ASIO_DECL void update_cancellation_thread_id( base_implementation_type& impl); // Helper function to get the reactor. If no reactor has been created yet, a // new one is obtained from the io_service and a pointer to it is cached in // this service. ASIO_DECL reactor& get_reactor(); // The type of a ConnectEx function pointer, as old SDKs may not provide it. typedef BOOL (PASCAL *connect_ex_fn)(SOCKET, const socket_addr_type*, int, void*, DWORD, DWORD*, OVERLAPPED*); // Helper function to get the ConnectEx pointer. If no ConnectEx pointer has // been obtained yet, one is obtained using WSAIoctl and the pointer is // cached. Returns a null pointer if ConnectEx is not available. ASIO_DECL connect_ex_fn get_connect_ex( base_implementation_type& impl, int type); // Helper function to emulate InterlockedCompareExchangePointer functionality // for: // - very old Platform SDKs; and // - platform SDKs where MSVC's /Wp64 option causes spurious warnings. ASIO_DECL void* interlocked_compare_exchange_pointer( void** dest, void* exch, void* cmp); // Helper function to emulate InterlockedExchangePointer functionality for: // - very old Platform SDKs; and // - platform SDKs where MSVC's /Wp64 option causes spurious warnings. ASIO_DECL void* interlocked_exchange_pointer(void** dest, void* val); // The io_service used to obtain the reactor, if required. asio::io_service& io_service_; // The IOCP service used for running asynchronous operations and dispatching // handlers. win_iocp_io_service& iocp_service_; // The reactor used for performing connect operations. This object is created // only if needed. reactor* reactor_; // Pointer to ConnectEx implementation. void* connect_ex_; // Mutex to protect access to the linked list of implementations. asio::detail::mutex mutex_; // The head of a linked list of all implementations. base_implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_socket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP galera-26.4.3/asio/asio/detail/null_signal_blocker.hpp0000664000177500017540000000274613540715002021240 0ustar dbartmy// // detail/null_signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_signal_blocker : private noncopyable { public: // Constructor blocks all signals for the calling thread. null_signal_blocker() { } // Destructor restores the previous signal mask. ~null_signal_blocker() { } // Block all signals for the calling thread. void block() { } // Restore the previous signal mask. void unblock() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) // || defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP galera-26.4.3/asio/asio/detail/gcc_hppa_fenced_block.hpp0000664000177500017540000000263713540715002021451 0ustar dbartmy// // detail/gcc_hppa_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_hppa_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_hppa_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_hppa_fenced_block(full_t) { barrier(); } // Destructor. ~gcc_hppa_fenced_block() { barrier(); } private: static void barrier() { // This is just a placeholder and almost certainly not sufficient. __asm__ __volatile__ ("" : : : "memory"); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) #endif // ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/reactive_socket_recvmsg_op.hpp0000664000177500017540000000776713540715002022636 0ustar dbartmy// // detail/reactive_socket_recvmsg_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recvmsg_op_base : public reactor_op { public: reactive_socket_recvmsg_op_base(socket_type socket, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, func_type complete_func) : reactor_op(&reactive_socket_recvmsg_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), in_flags_(in_flags), out_flags_(out_flags) { } static bool do_perform(reactor_op* base) { reactive_socket_recvmsg_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_recvmsg(o->socket_, bufs.buffers(), bufs.count(), o->in_flags_, o->out_flags_, o->ec_, o->bytes_transferred_); } private: socket_type socket_; MutableBufferSequence buffers_; socket_base::message_flags in_flags_; socket_base::message_flags& out_flags_; }; template class reactive_socket_recvmsg_op : public reactive_socket_recvmsg_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvmsg_op); reactive_socket_recvmsg_op(socket_type socket, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler) : reactive_socket_recvmsg_op_base(socket, buffers, in_flags, out_flags, &reactive_socket_recvmsg_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recvmsg_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP galera-26.4.3/asio/asio/detail/posix_signal_blocker.hpp0000664000177500017540000000353213540715002021422 0ustar dbartmy// // detail/posix_signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_signal_blocker : private noncopyable { public: // Constructor blocks all signals for the calling thread. posix_signal_blocker() : blocked_(false) { sigset_t new_mask; sigfillset(&new_mask); blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0); } // Destructor restores the previous signal mask. ~posix_signal_blocker() { if (blocked_) pthread_sigmask(SIG_SETMASK, &old_mask_, 0); } // Block all signals for the calling thread. void block() { if (!blocked_) { sigset_t new_mask; sigfillset(&new_mask); blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0); } } // Restore the previous signal mask. void unblock() { if (blocked_) blocked_ = (pthread_sigmask(SIG_SETMASK, &old_mask_, 0) != 0); } private: // Have signals been blocked. bool blocked_; // The previous signal mask. sigset_t old_mask_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP galera-26.4.3/asio/asio/detail/op_queue.hpp0000664000177500017540000000601213540715002017040 0ustar dbartmy// // detail/op_queue.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OP_QUEUE_HPP #define ASIO_DETAIL_OP_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class op_queue; class op_queue_access { public: template static Operation* next(Operation* o) { return static_cast(o->next_); } template static void next(Operation1*& o1, Operation2* o2) { o1->next_ = o2; } template static void destroy(Operation* o) { o->destroy(); } template static Operation*& front(op_queue& q) { return q.front_; } template static Operation*& back(op_queue& q) { return q.back_; } }; template class op_queue : private noncopyable { public: // Constructor. op_queue() : front_(0), back_(0) { } // Destructor destroys all operations. ~op_queue() { while (Operation* op = front_) { pop(); op_queue_access::destroy(op); } } // Get the operation at the front of the queue. Operation* front() { return front_; } // Pop an operation from the front of the queue. void pop() { if (front_) { Operation* tmp = front_; front_ = op_queue_access::next(front_); if (front_ == 0) back_ = 0; op_queue_access::next(tmp, static_cast(0)); } } // Push an operation on to the back of the queue. void push(Operation* h) { op_queue_access::next(h, static_cast(0)); if (back_) { op_queue_access::next(back_, h); back_ = h; } else { front_ = back_ = h; } } // Push all operations from another queue on to the back of the queue. The // source queue may contain operations of a derived type. template void push(op_queue& q) { if (Operation* other_front = op_queue_access::front(q)) { if (back_) op_queue_access::next(back_, other_front); else front_ = other_front; back_ = op_queue_access::back(q); op_queue_access::front(q) = 0; op_queue_access::back(q) = 0; } } // Whether the queue is empty. bool empty() const { return front_ == 0; } private: friend class op_queue_access; // The front of the queue. Operation* front_; // The back of the queue. Operation* back_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_OP_QUEUE_HPP galera-26.4.3/asio/asio/detail/handler_invoke_helpers.hpp0000664000177500017540000000315213540715002021732 0ustar dbartmy// // detail/handler_invoke_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP #define ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/handler_invoke_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_invoke must be made from a namespace that does not // contain overloads of this function. The asio_handler_invoke_helpers // namespace is defined here for that purpose. namespace asio_handler_invoke_helpers { template inline void invoke(Function& function, Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) Function tmp(function); tmp(); #else using asio::asio_handler_invoke; asio_handler_invoke(function, asio::detail::addressof(context)); #endif } template inline void invoke(const Function& function, Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) Function tmp(function); tmp(); #else using asio::asio_handler_invoke; asio_handler_invoke(function, asio::detail::addressof(context)); #endif } } // namespace asio_handler_invoke_helpers #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP galera-26.4.3/asio/asio/detail/thread_info_base.hpp0000664000177500017540000000414113540715002020473 0ustar dbartmy// // detail/thread_info_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_INFO_BASE_HPP #define ASIO_DETAIL_THREAD_INFO_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class thread_info_base : private noncopyable { public: thread_info_base() : reusable_memory_(0) { } ~thread_info_base() { if (reusable_memory_) ::operator delete(reusable_memory_); } static void* allocate(thread_info_base* this_thread, std::size_t size) { if (this_thread && this_thread->reusable_memory_) { void* const pointer = this_thread->reusable_memory_; this_thread->reusable_memory_ = 0; unsigned char* const mem = static_cast(pointer); if (static_cast(mem[0]) >= size) { mem[size] = mem[0]; return pointer; } ::operator delete(pointer); } void* const pointer = ::operator new(size + 1); unsigned char* const mem = static_cast(pointer); mem[size] = (size <= UCHAR_MAX) ? static_cast(size) : 0; return pointer; } static void deallocate(thread_info_base* this_thread, void* pointer, std::size_t size) { if (size <= UCHAR_MAX) { if (this_thread && this_thread->reusable_memory_ == 0) { unsigned char* const mem = static_cast(pointer); mem[0] = mem[size]; this_thread->reusable_memory_ = pointer; return; } } ::operator delete(pointer); } private: void* reusable_memory_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_THREAD_INFO_BASE_HPP galera-26.4.3/asio/asio/detail/regex_fwd.hpp0000664000177500017540000000146013540715002017172 0ustar dbartmy// // detail/regex_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REGEX_FWD_HPP #define ASIO_DETAIL_REGEX_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if defined(ASIO_HAS_BOOST_REGEX) #include #include namespace boost { template struct sub_match; template class match_results; } // namespace boost #endif // defined(ASIO_HAS_BOOST_REGEX) #endif // ASIO_DETAIL_REGEX_FWD_HPP galera-26.4.3/asio/asio/detail/winrt_async_manager.hpp0000664000177500017540000002026413540715002021255 0ustar dbartmy// // detail/winrt_async_manager.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP #define ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/atomic_count.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_async_manager : public asio::detail::service_base { public: // Constructor. winrt_async_manager(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), outstanding_ops_(1) { } // Destructor. ~winrt_async_manager() { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { if (--outstanding_ops_ > 0) { // Block until last operation is complete. std::future f = promise_.get_future(); f.wait(); } } void sync(Windows::Foundation::IAsyncAction^ action, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); action->Completed = ref new AsyncActionCompletedHandler( [promise](IAsyncAction^ action, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( action->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); } template TResult sync(Windows::Foundation::IAsyncOperation^ operation, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); operation->Completed = ref new AsyncOperationCompletedHandler( [promise](IAsyncOperation^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( operation->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); return operation->GetResults(); } template TResult sync( Windows::Foundation::IAsyncOperationWithProgress< TResult, TProgress>^ operation, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); operation->Completed = ref new AsyncOperationWithProgressCompletedHandler( [promise](IAsyncOperationWithProgress^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Started: break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( operation->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); return operation->GetResults(); } void async(Windows::Foundation::IAsyncAction^ action, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncActionCompletedHandler( [this, handler](IAsyncAction^ action, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: case AsyncStatus::Error: default: handler->ec_ = asio::error_code( action->ErrorCode.Value, asio::system_category()); break; } io_service_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); io_service_.work_started(); ++outstanding_ops_; action->Completed = on_completed; } template void async(Windows::Foundation::IAsyncOperation^ operation, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncOperationCompletedHandler( [this, handler](IAsyncOperation^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: handler->result_ = operation->GetResults(); // Fall through. case AsyncStatus::Error: default: handler->ec_ = asio::error_code( operation->ErrorCode.Value, asio::system_category()); break; } io_service_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); io_service_.work_started(); ++outstanding_ops_; operation->Completed = on_completed; } template void async( Windows::Foundation::IAsyncOperationWithProgress< TResult, TProgress>^ operation, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncOperationWithProgressCompletedHandler( [this, handler](IAsyncOperationWithProgress< TResult, TProgress>^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: handler->result_ = operation->GetResults(); // Fall through. case AsyncStatus::Error: default: handler->ec_ = asio::error_code( operation->ErrorCode.Value, asio::system_category()); break; } io_service_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); io_service_.work_started(); ++outstanding_ops_; operation->Completed = on_completed; } private: // The io_service implementation used to post completed handlers. io_service_impl& io_service_; // Count of outstanding operations. atomic_count outstanding_ops_; // Used to keep wait for outstanding operations to complete. std::promise promise_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP galera-26.4.3/asio/asio/detail/win_iocp_handle_read_op.hpp0000664000177500017540000000652113540715002022036 0ustar dbartmy// // detail/win_iocp_handle_read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_handle_read_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_read_op); win_iocp_handle_read_op( const MutableBufferSequence& buffers, Handler& handler) : operation(&win_iocp_handle_read_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_handle_read_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (owner) { // Check whether buffers are still valid. buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_HANDLE_EOF) ec = asio::error::eof; // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: MutableBufferSequence buffers_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP galera-26.4.3/asio/asio/detail/null_socket_service.hpp0000664000177500017540000003567613540715002021302 0ustar dbartmy// // detail/null_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP #define ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class null_socket_service { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef int native_handle_type; // The implementation type of the socket. struct implementation_type { }; // Constructor. null_socket_service(asio::io_service& io_service) : io_service_(io_service) { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Construct a new socket implementation. void construct(implementation_type&) { } // Move-construct a new socket implementation. void move_construct(implementation_type&, implementation_type&) { } // Move-assign from another socket implementation. void move_assign(implementation_type&, null_socket_service&, implementation_type&) { } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type&, typename null_socket_service::implementation_type&) { } // Destroy a socket implementation. void destroy(implementation_type&) { } // Open a new socket implementation. asio::error_code open(implementation_type&, const protocol_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type&, const protocol_type&, const native_handle_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is open. bool is_open(const implementation_type&) const { return false; } // Destroy a socket implementation. asio::error_code close(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type&) { return 0; } // Cancel all operations associated with the socket. asio::error_code cancel(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is at the out-of-band data mark. bool at_mark(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return false; } // Determine the number of bytes available for reading. std::size_t available(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return 0; } // Place the socket into the state where it will listen for new connections. asio::error_code listen(implementation_type&, int, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(implementation_type&, IO_Control_Command&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const implementation_type&) const { return false; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const implementation_type&) const { return false; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(implementation_type&, socket_base::shutdown_type, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code set_option(implementation_type&, const Option&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type&, Option&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return endpoint_type(); } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return endpoint_type(); } // Send the given data to the peer. template std::size_t send(implementation_type&, const ConstBufferSequence&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be sent without blocking. std::size_t send(implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(implementation_type&, const ConstBufferSequence&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Start an asynchronous wait until data can be sent without blocking. template void async_send(implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data from the peer. Returns the number of bytes received. template std::size_t receive(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive(implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive(implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data with associated flags. Returns the number of bytes // received. template std::size_t receive_with_flags(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, socket_base::message_flags&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive_with_flags(implementation_type&, const null_buffers&, socket_base::message_flags, socket_base::message_flags&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, socket_base::message_flags&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive_with_flags(implementation_type&, const null_buffers&, socket_base::message_flags, socket_base::message_flags&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template std::size_t send_to(implementation_type&, const ConstBufferSequence&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be sent without blocking. std::size_t send_to(implementation_type&, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type&, const ConstBufferSequence&, const endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type&, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template std::size_t receive_from(implementation_type&, const MutableBufferSequence&, endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive_from(implementation_type&, const null_buffers&, endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type&, const MutableBufferSequence&, endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive_from(implementation_type&, const null_buffers&, endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; io_service_.post(detail::bind_handler(handler, ec, bytes_transferred)); } // Accept a new connection. template asio::error_code accept(implementation_type&, Socket&, endpoint_type*, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type&, Socket&, endpoint_type*, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; io_service_.post(detail::bind_handler(handler, ec)); } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Start an asynchronous connect. template void async_connect(implementation_type&, const endpoint_type&, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; io_service_.post(detail::bind_handler(handler, ec)); } private: asio::io_service& io_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP galera-26.4.3/asio/asio/detail/winrt_utils.hpp0000664000177500017540000000510213540715002017600 0ustar dbartmy// // detail/winrt_utils.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_UTILS_HPP #define ASIO_DETAIL_WINRT_UTILS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include #include #include #include #include #include #include #include "asio/buffer.hpp" #include "asio/error_code.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace winrt_utils { inline Platform::String^ string(const char* from) { std::wstring tmp(from, from + std::strlen(from)); return ref new Platform::String(tmp.c_str()); } inline Platform::String^ string(const std::string& from) { std::wstring tmp(from.begin(), from.end()); return ref new Platform::String(tmp.c_str()); } inline std::string string(Platform::String^ from) { std::wstring_convert> converter; return converter.to_bytes(from->Data()); } inline Platform::String^ string(unsigned short from) { return string(std::to_string(from)); } template inline Platform::String^ string(const T& from) { return string(from.to_string()); } inline int integer(Platform::String^ from) { return _wtoi(from->Data()); } template inline Windows::Networking::HostName^ host_name(const T& from) { return ref new Windows::Networking::HostName((string)(from)); } template inline Windows::Storage::Streams::IBuffer^ buffer_dup( const ConstBufferSequence& buffers) { using Microsoft::WRL::ComPtr; std::size_t size = asio::buffer_size(buffers); auto b = ref new Windows::Storage::Streams::Buffer(size); ComPtr insp = reinterpret_cast(b); ComPtr bacc; insp.As(&bacc); byte* bytes = nullptr; bacc->Buffer(&bytes); asio::buffer_copy(asio::buffer(bytes, size), buffers); b->Length = size; return b; } } // namespace winrt_utils } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_UTILS_HPP galera-26.4.3/asio/asio/detail/reactor.hpp0000664000177500017540000000157513540715002016666 0ustar dbartmy// // detail/reactor.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_HPP #define ASIO_DETAIL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/reactor_fwd.hpp" #if defined(ASIO_HAS_EPOLL) # include "asio/detail/epoll_reactor.hpp" #elif defined(ASIO_HAS_KQUEUE) # include "asio/detail/kqueue_reactor.hpp" #elif defined(ASIO_HAS_DEV_POLL) # include "asio/detail/dev_poll_reactor.hpp" #elif defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_reactor.hpp" #else # include "asio/detail/select_reactor.hpp" #endif #endif // ASIO_DETAIL_REACTOR_HPP galera-26.4.3/asio/asio/detail/resolver_service_base.hpp0000664000177500017540000000706213540715002021577 0ustar dbartmy// // detail/resolver_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP #define ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolver_service_base { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the background thread that the operation has been cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // Constructor. ASIO_DECL resolver_service_base(asio::io_service& io_service); // Destructor. ASIO_DECL ~resolver_service_base(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Perform any fork-related housekeeping. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Construct a new resolver implementation. ASIO_DECL void construct(implementation_type& impl); // Destroy a resolver implementation. ASIO_DECL void destroy(implementation_type&); // Cancel pending asynchronous operations. ASIO_DECL void cancel(implementation_type& impl); protected: // Helper function to start an asynchronous resolve operation. ASIO_DECL void start_resolve_op(operation* op); #if !defined(ASIO_WINDOWS_RUNTIME) // Helper class to perform exception-safe cleanup of addrinfo objects. class auto_addrinfo : private asio::detail::noncopyable { public: explicit auto_addrinfo(asio::detail::addrinfo_type* ai) : ai_(ai) { } ~auto_addrinfo() { if (ai_) socket_ops::freeaddrinfo(ai_); } operator asio::detail::addrinfo_type*() { return ai_; } private: asio::detail::addrinfo_type* ai_; }; #endif // !defined(ASIO_WINDOWS_RUNTIME) // Helper class to run the work io_service in a thread. class work_io_service_runner; // Start the work thread if it's not already running. ASIO_DECL void start_work_thread(); // The io_service implementation used to post completions. io_service_impl& io_service_impl_; private: // Mutex to protect access to internal data. asio::detail::mutex mutex_; // Private io_service used for performing asynchronous host resolution. asio::detail::scoped_ptr work_io_service_; // The work io_service implementation used to post completions. io_service_impl& work_io_service_impl_; // Work for the private io_service to perform. asio::detail::scoped_ptr work_; // Thread used for running the work io_service's run loop. asio::detail::scoped_ptr work_thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/resolver_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_service.hpp0000664000177500017540000004166513540715002022132 0ustar dbartmy// // detail/win_iocp_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/socket_base.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/win_iocp_null_buffers_op.hpp" #include "asio/detail/win_iocp_socket_accept_op.hpp" #include "asio/detail/win_iocp_socket_connect_op.hpp" #include "asio/detail/win_iocp_socket_recvfrom_op.hpp" #include "asio/detail/win_iocp_socket_send_op.hpp" #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_service : public win_iocp_socket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. class native_handle_type { public: native_handle_type(socket_type s) : socket_(s), have_remote_endpoint_(false) { } native_handle_type(socket_type s, const endpoint_type& ep) : socket_(s), have_remote_endpoint_(true), remote_endpoint_(ep) { } void operator=(socket_type s) { socket_ = s; have_remote_endpoint_ = false; remote_endpoint_ = endpoint_type(); } operator socket_type() const { return socket_; } bool have_remote_endpoint() const { return have_remote_endpoint_; } endpoint_type remote_endpoint() const { return remote_endpoint_; } private: socket_type socket_; bool have_remote_endpoint_; endpoint_type remote_endpoint_; }; // The implementation type of the socket. struct implementation_type : win_iocp_socket_service_base::base_implementation_type { // Default constructor. implementation_type() : protocol_(endpoint_type().protocol()), have_remote_endpoint_(false), remote_endpoint_() { } // The protocol associated with the socket. protocol_type protocol_; // Whether we have a cached remote endpoint. bool have_remote_endpoint_; // A cached remote endpoint. endpoint_type remote_endpoint_; }; // Constructor. win_iocp_socket_service(asio::io_service& io_service) : win_iocp_socket_service_base(io_service) { } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = endpoint_type(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, win_iocp_socket_service_base& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = endpoint_type(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, typename win_iocp_socket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = typename Protocol1::endpoint(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (!do_open(impl, protocol.family(), protocol.type(), protocol.protocol(), ec)) { impl.protocol_ = protocol; impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = endpoint_type(); } return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (!do_assign(impl, protocol.type(), native_socket, ec)) { impl.protocol_ = protocol; impl.have_remote_endpoint_ = native_socket.have_remote_endpoint(); impl.remote_endpoint_ = native_socket.remote_endpoint(); } return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type& impl) { if (impl.have_remote_endpoint_) return native_handle_type(impl.socket_, impl.remote_endpoint_); return native_handle_type(impl.socket_); } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec); return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { socket_ops::setsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); socket_ops::getsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint = impl.remote_endpoint_; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getpeername(impl.socket_, endpoint.data(), &addr_len, impl.have_remote_endpoint_, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_sendto(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, destination.data(), destination.size(), ec); } // Wait until data can be sent without blocking. size_t send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to")); buffer_sequence_adapter bufs(buffers); start_send_to_op(impl, bufs.buffers(), bufs.count(), destination.data(), static_cast(destination.size()), flags, p.p); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_send_to(null_buffers)")); start_reactor_op(impl, reactor::write_op, p.p); p.v = p.p = 0; } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); std::size_t addr_len = sender_endpoint.capacity(); std::size_t bytes_recvd = socket_ops::sync_recvfrom( impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, sender_endpoint.data(), &addr_len, ec); if (!ec) sender_endpoint.resize(addr_len); return bytes_recvd; } // Wait until data can be received without blocking. size_t receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, ec); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endp, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recvfrom_op< MutableBufferSequence, endpoint_type, Handler> op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(sender_endp, impl.cancel_token_, buffers, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from")); buffer_sequence_adapter bufs(buffers); start_receive_from_op(impl, bufs.buffers(), bufs.count(), sender_endp.data(), flags, &p.p->endpoint_size(), p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_receive_from(null_buffers)")); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); start_null_buffers_receive_op(impl, flags, p.p); p.v = p.p = 0; } // Accept a new connection. template asio::error_code accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec) { // We cannot accept a socket that is already open. if (peer.is_open()) { ec = asio::error::already_open; return ec; } std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0; socket_holder new_socket(socket_ops::sync_accept(impl.socket_, impl.state_, peer_endpoint ? peer_endpoint->data() : 0, peer_endpoint ? &addr_len : 0, ec)); // On success, assign new connection to peer socket object. if (new_socket.get() != invalid_socket) { if (peer_endpoint) peer_endpoint->resize(addr_len); if (!peer.assign(impl.protocol_, new_socket.get(), ec)) new_socket.release(); } return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; bool enable_connection_aborted = (impl.state_ & socket_ops::enable_connection_aborted) != 0; p.p = new (p.v) op(*this, impl.socket_, peer, impl.protocol_, peer_endpoint, enable_connection_aborted, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_accept")); start_accept_op(impl, peer.is_open(), p.p->new_socket(), impl.protocol_.family(), impl.protocol_.type(), impl.protocol_.protocol(), p.p->output_buffer(), p.p->address_length(), p.p); p.v = p.p = 0; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { socket_ops::sync_connect(impl.socket_, peer_endpoint.data(), peer_endpoint.size(), ec); return ec; } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(impl.socket_, handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_connect")); start_connect_op(impl, impl.protocol_.family(), impl.protocol_.type(), peer_endpoint.data(), static_cast(peer_endpoint.size()), p.p); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP galera-26.4.3/asio/asio/detail/winrt_ssocket_service.hpp0000664000177500017540000001451513540715002021643 0ustar dbartmy// // detail/winrt_ssocket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP #define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/winrt_socket_connect_op.hpp" #include "asio/detail/winrt_ssocket_service_base.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_ssocket_service : public winrt_ssocket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type; // The implementation type of the socket. struct implementation_type : base_implementation_type { // Default constructor. implementation_type() : base_implementation_type(), protocol_(endpoint_type().protocol()) { } // The protocol associated with the socket. protocol_type protocol_; }; // Constructor. winrt_ssocket_service(asio::io_service& io_service) : winrt_ssocket_service_base(io_service) { } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, winrt_ssocket_service& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, typename winrt_ssocket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } try { impl.socket_ = ref new Windows::Networking::Sockets::StreamSocket; impl.protocol_ = protocol; ec = asio::error_code(); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } impl.socket_ = native_socket; impl.protocol_ = protocol; ec = asio::error_code(); return ec; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; endpoint.resize(do_get_endpoint(impl, true, endpoint.data(), endpoint.size(), ec)); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; endpoint.resize(do_get_endpoint(impl, false, endpoint.data(), endpoint.size(), ec)); return endpoint; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { return do_set_option(impl, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); } // Get a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); do_get_option(impl, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { return do_connect(impl, peer_endpoint.data(), ec); } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "socket", &impl, "async_connect")); start_connect_op(impl, peer_endpoint.data(), p.p, is_continuation); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP galera-26.4.3/asio/asio/detail/timer_scheduler_fwd.hpp0000664000177500017540000000215413540715002021237 0ustar dbartmy// // detail/timer_scheduler_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP #define ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) typedef class winrt_timer_scheduler timer_scheduler; #elif defined(ASIO_HAS_IOCP) typedef class win_iocp_io_service timer_scheduler; #elif defined(ASIO_HAS_EPOLL) typedef class epoll_reactor timer_scheduler; #elif defined(ASIO_HAS_KQUEUE) typedef class kqueue_reactor timer_scheduler; #elif defined(ASIO_HAS_DEV_POLL) typedef class dev_poll_reactor timer_scheduler; #else typedef class select_reactor timer_scheduler; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP galera-26.4.3/asio/asio/detail/mutex.hpp0000664000177500017540000000230013540715002016354 0ustar dbartmy// // detail/mutex.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MUTEX_HPP #define ASIO_DETAIL_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_mutex.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_mutex.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_mutex.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_mutex.hpp" #else # error Only Windows, POSIX and std::mutex are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_mutex mutex; #elif defined(ASIO_WINDOWS) typedef win_mutex mutex; #elif defined(ASIO_HAS_PTHREADS) typedef posix_mutex mutex; #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_mutex mutex; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_MUTEX_HPP galera-26.4.3/asio/asio/detail/null_reactor.hpp0000664000177500017540000000247113540715002017714 0ustar dbartmy// // detail/null_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_REACTOR_HPP #define ASIO_DETAIL_NULL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_reactor : public asio::detail::service_base { public: // Constructor. null_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service) { } // Destructor. ~null_reactor() { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // No-op because should never be called. void run(bool /*block*/, op_queue& /*ops*/) { } // No-op. void interrupt() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_NULL_REACTOR_HPP galera-26.4.3/asio/asio/detail/signal_op.hpp0000664000177500017540000000204013540715002017166 0ustar dbartmy// // detail/signal_op.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_OP_HPP #define ASIO_DETAIL_SIGNAL_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class signal_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The signal number to be passed to the completion handler. int signal_number_; protected: signal_op(func_type func) : operation(func), signal_number_(0) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SIGNAL_OP_HPP galera-26.4.3/asio/asio/detail/buffer_sequence_adapter.hpp0000664000177500017540000002277413540715002022074 0ustar dbartmy// // detail/buffer_sequence_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP #define ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class buffer_sequence_adapter_base { protected: #if defined(ASIO_WINDOWS_RUNTIME) // The maximum number of buffers to support in a single operation. enum { max_buffers = 1 }; typedef Windows::Storage::Streams::IBuffer^ native_buffer_type; ASIO_DECL static void init_native_buffer( native_buffer_type& buf, const asio::mutable_buffer& buffer); ASIO_DECL static void init_native_buffer( native_buffer_type& buf, const asio::const_buffer& buffer); #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) // The maximum number of buffers to support in a single operation. enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len }; typedef WSABUF native_buffer_type; static void init_native_buffer(WSABUF& buf, const asio::mutable_buffer& buffer) { buf.buf = asio::buffer_cast(buffer); buf.len = static_cast(asio::buffer_size(buffer)); } static void init_native_buffer(WSABUF& buf, const asio::const_buffer& buffer) { buf.buf = const_cast(asio::buffer_cast(buffer)); buf.len = static_cast(asio::buffer_size(buffer)); } #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // The maximum number of buffers to support in a single operation. enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len }; typedef iovec native_buffer_type; static void init_iov_base(void*& base, void* addr) { base = addr; } template static void init_iov_base(T& base, void* addr) { base = static_cast(addr); } static void init_native_buffer(iovec& iov, const asio::mutable_buffer& buffer) { init_iov_base(iov.iov_base, asio::buffer_cast(buffer)); iov.iov_len = asio::buffer_size(buffer); } static void init_native_buffer(iovec& iov, const asio::const_buffer& buffer) { init_iov_base(iov.iov_base, const_cast( asio::buffer_cast(buffer))); iov.iov_len = asio::buffer_size(buffer); } #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) }; // Helper class to translate buffers into the native buffer representation. template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter(const Buffers& buffer_sequence) : count_(0), total_buffer_size_(0) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); for (; iter != end && count_ < max_buffers; ++iter, ++count_) { Buffer buffer(*iter); init_native_buffer(buffers_[count_], buffer); total_buffer_size_ += asio::buffer_size(buffer); } } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return count_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const Buffers& buffer_sequence) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); std::size_t i = 0; for (; iter != end && i < max_buffers; ++iter, ++i) if (asio::buffer_size(Buffer(*iter)) > 0) return false; return true; } static void validate(const Buffers& buffer_sequence) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); for (; iter != end; ++iter) { Buffer buffer(*iter); asio::buffer_cast(buffer); } } static Buffer first(const Buffers& buffer_sequence) { typename Buffers::const_iterator iter = buffer_sequence.begin(); typename Buffers::const_iterator end = buffer_sequence.end(); for (; iter != end; ++iter) { Buffer buffer(*iter); if (asio::buffer_size(buffer) != 0) return buffer; } return Buffer(); } private: native_buffer_type buffers_[max_buffers]; std::size_t count_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::mutable_buffers_1& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = asio::buffer_size(buffer_sequence); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::mutable_buffers_1& buffer_sequence) { return asio::buffer_size(buffer_sequence) == 0; } static void validate(const asio::mutable_buffers_1& buffer_sequence) { asio::buffer_cast(buffer_sequence); } static Buffer first(const asio::mutable_buffers_1& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::const_buffers_1& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = asio::buffer_size(buffer_sequence); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::const_buffers_1& buffer_sequence) { return asio::buffer_size(buffer_sequence) == 0; } static void validate(const asio::const_buffers_1& buffer_sequence) { asio::buffer_cast(buffer_sequence); } static Buffer first(const asio::const_buffers_1& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter > : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const boost::array& buffer_sequence) { init_native_buffer(buffers_[0], Buffer(buffer_sequence[0])); init_native_buffer(buffers_[1], Buffer(buffer_sequence[1])); total_buffer_size_ = asio::buffer_size(buffer_sequence[0]) + asio::buffer_size(buffer_sequence[1]); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return 2; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const boost::array& buffer_sequence) { return asio::buffer_size(buffer_sequence[0]) == 0 && asio::buffer_size(buffer_sequence[1]) == 0; } static void validate(const boost::array& buffer_sequence) { asio::buffer_cast(buffer_sequence[0]); asio::buffer_cast(buffer_sequence[1]); } static Buffer first(const boost::array& buffer_sequence) { return Buffer(asio::buffer_size(buffer_sequence[0]) != 0 ? buffer_sequence[0] : buffer_sequence[1]); } private: native_buffer_type buffers_[2]; std::size_t total_buffer_size_; }; #if defined(ASIO_HAS_STD_ARRAY) template class buffer_sequence_adapter > : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const std::array& buffer_sequence) { init_native_buffer(buffers_[0], Buffer(buffer_sequence[0])); init_native_buffer(buffers_[1], Buffer(buffer_sequence[1])); total_buffer_size_ = asio::buffer_size(buffer_sequence[0]) + asio::buffer_size(buffer_sequence[1]); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return 2; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const std::array& buffer_sequence) { return asio::buffer_size(buffer_sequence[0]) == 0 && asio::buffer_size(buffer_sequence[1]) == 0; } static void validate(const std::array& buffer_sequence) { asio::buffer_cast(buffer_sequence[0]); asio::buffer_cast(buffer_sequence[1]); } static Buffer first(const std::array& buffer_sequence) { return Buffer(asio::buffer_size(buffer_sequence[0]) != 0 ? buffer_sequence[0] : buffer_sequence[1]); } private: native_buffer_type buffers_[2]; std::size_t total_buffer_size_; }; #endif // defined(ASIO_HAS_STD_ARRAY) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/buffer_sequence_adapter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP galera-26.4.3/asio/asio/detail/io_control.hpp0000664000177500017540000000514413540715002017372 0ustar dbartmy// // detail/io_control.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IO_CONTROL_HPP #define ASIO_DETAIL_IO_CONTROL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace io_control { // IO control command for non-blocking I/O. class non_blocking_io { public: // Default constructor. non_blocking_io() : value_(0) { } // Construct with a specific command value. non_blocking_io(bool value) : value_(value ? 1 : 0) { } // Get the name of the IO control command. int name() const { return static_cast(ASIO_OS_DEF(FIONBIO)); } // Set the value of the I/O control command. void set(bool value) { value_ = value ? 1 : 0; } // Get the current value of the I/O control command. bool get() const { return value_ != 0; } // Get the address of the command data. detail::ioctl_arg_type* data() { return &value_; } // Get the address of the command data. const detail::ioctl_arg_type* data() const { return &value_; } private: detail::ioctl_arg_type value_; }; // I/O control command for getting number of bytes available. class bytes_readable { public: // Default constructor. bytes_readable() : value_(0) { } // Construct with a specific command value. bytes_readable(std::size_t value) : value_(static_cast(value)) { } // Get the name of the IO control command. int name() const { return static_cast(ASIO_OS_DEF(FIONREAD)); } // Set the value of the I/O control command. void set(std::size_t value) { value_ = static_cast(value); } // Get the current value of the I/O control command. std::size_t get() const { return static_cast(value_); } // Get the address of the command data. detail::ioctl_arg_type* data() { return &value_; } // Get the address of the command data. const detail::ioctl_arg_type* data() const { return &value_; } private: detail::ioctl_arg_type value_; }; } // namespace io_control } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IO_CONTROL_HPP galera-26.4.3/asio/asio/detail/win_fd_set_adapter.hpp0000664000177500017540000000733613540715002021051 0ustar dbartmy// // detail/win_fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Adapts the FD_SET type to meet the Descriptor_Set concept's requirements. class win_fd_set_adapter : noncopyable { public: enum { default_fd_set_size = 1024 }; win_fd_set_adapter() : capacity_(default_fd_set_size), max_descriptor_(invalid_socket) { fd_set_ = static_cast(::operator new( sizeof(win_fd_set) - sizeof(SOCKET) + sizeof(SOCKET) * (capacity_))); fd_set_->fd_count = 0; } ~win_fd_set_adapter() { ::operator delete(fd_set_); } void reset() { fd_set_->fd_count = 0; max_descriptor_ = invalid_socket; } bool set(socket_type descriptor) { for (u_int i = 0; i < fd_set_->fd_count; ++i) if (fd_set_->fd_array[i] == descriptor) return true; reserve(fd_set_->fd_count + 1); fd_set_->fd_array[fd_set_->fd_count++] = descriptor; return true; } void set(reactor_op_queue& operations, op_queue&) { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; reserve(fd_set_->fd_count + 1); fd_set_->fd_array[fd_set_->fd_count++] = op_iter->first; } } bool is_set(socket_type descriptor) const { return !!__WSAFDIsSet(descriptor, const_cast(reinterpret_cast(fd_set_))); } operator fd_set*() { return reinterpret_cast(fd_set_); } socket_type max_descriptor() const { return max_descriptor_; } void perform(reactor_op_queue& operations, op_queue& ops) const { for (u_int i = 0; i < fd_set_->fd_count; ++i) operations.perform_operations(fd_set_->fd_array[i], ops); } private: // This structure is defined to be compatible with the Windows API fd_set // structure, but without being dependent on the value of FD_SETSIZE. We use // the "struct hack" to allow the number of descriptors to be varied at // runtime. struct win_fd_set { u_int fd_count; SOCKET fd_array[1]; }; // Increase the fd_set_ capacity to at least the specified number of elements. void reserve(u_int n) { if (n <= capacity_) return; u_int new_capacity = capacity_ + capacity_ / 2; if (new_capacity < n) new_capacity = n; win_fd_set* new_fd_set = static_cast(::operator new( sizeof(win_fd_set) - sizeof(SOCKET) + sizeof(SOCKET) * (new_capacity))); new_fd_set->fd_count = fd_set_->fd_count; for (u_int i = 0; i < fd_set_->fd_count; ++i) new_fd_set->fd_array[i] = fd_set_->fd_array[i]; ::operator delete(fd_set_); fd_set_ = new_fd_set; capacity_ = new_capacity; } win_fd_set* fd_set_; u_int capacity_; socket_type max_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP galera-26.4.3/asio/asio/detail/descriptor_read_op.hpp0000664000177500017540000000707613540715002021100 0ustar dbartmy// // detail/descriptor_read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP #define ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class descriptor_read_op_base : public reactor_op { public: descriptor_read_op_base(int descriptor, const MutableBufferSequence& buffers, func_type complete_func) : reactor_op(&descriptor_read_op_base::do_perform, complete_func), descriptor_(descriptor), buffers_(buffers) { } static bool do_perform(reactor_op* base) { descriptor_read_op_base* o(static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return descriptor_ops::non_blocking_read(o->descriptor_, bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_); } private: int descriptor_; MutableBufferSequence buffers_; }; template class descriptor_read_op : public descriptor_read_op_base { public: ASIO_DEFINE_HANDLER_PTR(descriptor_read_op); descriptor_read_op(int descriptor, const MutableBufferSequence& buffers, Handler& handler) : descriptor_read_op_base( descriptor, buffers, &descriptor_read_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. descriptor_read_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP galera-26.4.3/asio/asio/detail/posix_static_mutex.hpp0000664000177500017540000000252113540715002021152 0ustar dbartmy// // detail/posix_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP #define ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct posix_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. void init() { // Nothing to do. } // Lock the mutex. void lock() { (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL. } // Unlock the mutex. void unlock() { (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL. } ::pthread_mutex_t mutex_; }; #define ASIO_POSIX_STATIC_MUTEX_INIT { PTHREAD_MUTEX_INITIALIZER } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP galera-26.4.3/asio/asio/detail/win_iocp_handle_service.hpp0000664000177500017540000002653613540715002022075 0ustar dbartmy// // detail/win_iocp_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/win_iocp_handle_read_op.hpp" #include "asio/detail/win_iocp_handle_write_op.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_handle_service { public: // The native type of a stream handle. typedef HANDLE native_handle_type; // The implementation type of the stream handle. class implementation_type { public: // Default constructor. implementation_type() : handle_(INVALID_HANDLE_VALUE), safe_cancellation_thread_id_(0), next_(0), prev_(0) { } private: // Only this service will have access to the internal values. friend class win_iocp_handle_service; // The native stream handle representation. native_handle_type handle_; // The ID of the thread from which it is safe to cancel asynchronous // operations. 0 means no asynchronous operations have been started yet. // ~0 means asynchronous operations have been started from more than one // thread, and cancellation is not supported for the handle. DWORD safe_cancellation_thread_id_; // Pointers to adjacent handle implementations in linked list. implementation_type* next_; implementation_type* prev_; }; ASIO_DECL win_iocp_handle_service(asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new handle implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new handle implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another handle implementation. ASIO_DECL void move_assign(implementation_type& impl, win_iocp_handle_service& other_service, implementation_type& other_impl); // Destroy a handle implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native handle to a handle implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec); // Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return impl.handle_ != INVALID_HANDLE_VALUE; } // Destroy a handle implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native handle representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.handle_; } // Cancel all operations associated with the handle. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Write the given data. Returns the number of bytes written. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return write_some_at(impl, 0, buffers, ec); } // Write the given data at the specified offset. Returns the number of bytes // written. template size_t write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { asio::const_buffer buffer = buffer_sequence_adapter::first(buffers); return do_write(impl, offset, buffer, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_write_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_write_some")); start_write_op(impl, 0, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Start an asynchronous write at a specified offset. The data being written // must be valid for the lifetime of the asynchronous operation. template void async_write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_write_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_write_some_at")); start_write_op(impl, offset, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return read_some_at(impl, 0, buffers, ec); } // Read some data at a specified offset. Returns the number of bytes received. template size_t read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { asio::mutable_buffer buffer = buffer_sequence_adapter::first(buffers); return do_read(impl, offset, buffer, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_read_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_read_some")); start_read_op(impl, 0, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Start an asynchronous read at a specified offset. The buffer for the data // being received must be valid for the lifetime of the asynchronous // operation. template void async_read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_read_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(buffers, handler); ASIO_HANDLER_CREATION((p.p, "handle", &impl, "async_read_some_at")); start_read_op(impl, offset, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } private: // Prevent the use of the null_buffers type with this service. size_t write_some(implementation_type& impl, const null_buffers& buffers, asio::error_code& ec); size_t write_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, asio::error_code& ec); template void async_write_some(implementation_type& impl, const null_buffers& buffers, Handler& handler); template void async_write_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, Handler& handler); size_t read_some(implementation_type& impl, const null_buffers& buffers, asio::error_code& ec); size_t read_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, asio::error_code& ec); template void async_read_some(implementation_type& impl, const null_buffers& buffers, Handler& handler); template void async_read_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, Handler& handler); // Helper class for waiting for synchronous operations to complete. class overlapped_wrapper; // Helper function to perform a synchronous write operation. ASIO_DECL size_t do_write(implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, asio::error_code& ec); // Helper function to start a write operation. ASIO_DECL void start_write_op(implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, operation* op); // Helper function to perform a synchronous write operation. ASIO_DECL size_t do_read(implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, asio::error_code& ec); // Helper function to start a read operation. ASIO_DECL void start_read_op(implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, operation* op); // Update the ID of the thread from which cancellation is safe. ASIO_DECL void update_cancellation_thread_id(implementation_type& impl); // Helper function to close a handle when the associated object is being // destroyed. ASIO_DECL void close_for_destruction(implementation_type& impl); // The IOCP service used for running asynchronous operations and dispatching // handlers. win_iocp_io_service& iocp_service_; // Mutex to protect access to the linked list of implementations. mutex mutex_; // The head of a linked list of all implementations. implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_handle_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP galera-26.4.3/asio/asio/detail/static_mutex.hpp0000664000177500017540000000303213540715002017726 0ustar dbartmy// // detail/static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STATIC_MUTEX_HPP #define ASIO_DETAIL_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_static_mutex.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_static_mutex.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_static_mutex.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_static_mutex.hpp" #else # error Only Windows and POSIX are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_NULL_STATIC_MUTEX_INIT #elif defined(ASIO_WINDOWS) typedef win_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_WIN_STATIC_MUTEX_INIT #elif defined(ASIO_HAS_PTHREADS) typedef posix_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_POSIX_STATIC_MUTEX_INIT #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_STD_STATIC_MUTEX_INIT #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_STATIC_MUTEX_HPP galera-26.4.3/asio/asio/detail/posix_thread.hpp0000664000177500017540000000361613540715002017716 0ustar dbartmy// // detail/posix_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_THREAD_HPP #define ASIO_DETAIL_POSIX_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { extern "C" { ASIO_DECL void* asio_detail_posix_thread_function(void* arg); } class posix_thread : private noncopyable { public: // Constructor. template posix_thread(Function f, unsigned int = 0) : joined_(false) { start_thread(new func(f)); } // Destructor. ASIO_DECL ~posix_thread(); // Wait for the thread to exit. ASIO_DECL void join(); private: friend void* asio_detail_posix_thread_function(void* arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; struct auto_func_base_ptr { func_base* ptr; ~auto_func_base_ptr() { delete ptr; } }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ASIO_DECL void start_thread(func_base* arg); ::pthread_t thread_; bool joined_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_thread.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_THREAD_HPP galera-26.4.3/asio/asio/detail/assert.hpp0000664000177500017540000000160013540715002016515 0ustar dbartmy// // detail/assert.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ASSERT_HPP #define ASIO_DETAIL_ASSERT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_ASSERT) # include #else // defined(ASIO_HAS_BOOST_ASSERT) # include #endif // defined(ASIO_HAS_BOOST_ASSERT) #if defined(ASIO_HAS_BOOST_ASSERT) # define ASIO_ASSERT(expr) BOOST_ASSERT(expr) #else // defined(ASIO_HAS_BOOST_ASSERT) # define ASIO_ASSERT(expr) assert(expr) #endif // defined(ASIO_HAS_BOOST_ASSERT) #endif // ASIO_DETAIL_ASSERT_HPP galera-26.4.3/asio/asio/detail/descriptor_ops.hpp0000664000177500017540000000613713540715002020265 0ustar dbartmy// // detail/descriptor_ops.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_OPS_HPP #define ASIO_DETAIL_DESCRIPTOR_OPS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include #include "asio/error_code.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace descriptor_ops { // Descriptor state bits. enum { // The user wants a non-blocking descriptor. user_set_non_blocking = 1, // The descriptor has been set non-blocking. internal_non_blocking = 2, // Helper "state" used to determine whether the descriptor is non-blocking. non_blocking = user_set_non_blocking | internal_non_blocking, // The descriptor may have been dup()-ed. possible_dup = 4 }; typedef unsigned char state_type; template inline ReturnType error_wrapper(ReturnType return_value, asio::error_code& ec) { ec = asio::error_code(errno, asio::error::get_system_category()); return return_value; } ASIO_DECL int open(const char* path, int flags, asio::error_code& ec); ASIO_DECL int close(int d, state_type& state, asio::error_code& ec); ASIO_DECL bool set_user_non_blocking(int d, state_type& state, bool value, asio::error_code& ec); ASIO_DECL bool set_internal_non_blocking(int d, state_type& state, bool value, asio::error_code& ec); typedef iovec buf; ASIO_DECL std::size_t sync_read(int d, state_type state, buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec); ASIO_DECL bool non_blocking_read(int d, buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred); ASIO_DECL std::size_t sync_write(int d, state_type state, const buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec); ASIO_DECL bool non_blocking_write(int d, const buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred); ASIO_DECL int ioctl(int d, state_type& state, long cmd, ioctl_arg_type* arg, asio::error_code& ec); ASIO_DECL int fcntl(int d, int cmd, asio::error_code& ec); ASIO_DECL int fcntl(int d, int cmd, long arg, asio::error_code& ec); ASIO_DECL int poll_read(int d, state_type state, asio::error_code& ec); ASIO_DECL int poll_write(int d, state_type state, asio::error_code& ec); } // namespace descriptor_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/descriptor_ops.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_OPS_HPP galera-26.4.3/asio/asio/detail/task_io_service_operation.hpp0000664000177500017540000000347013540715002022454 0ustar dbartmy// // detail/task_io_service_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TASK_IO_SERVICE_OPERATION_HPP #define ASIO_DETAIL_TASK_IO_SERVICE_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/error_code.hpp" #include "asio/detail/handler_tracking.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class task_io_service; // Base class for all operations. A function pointer is used instead of virtual // functions to avoid the associated overhead. class task_io_service_operation ASIO_INHERIT_TRACKED_HANDLER { public: void complete(task_io_service& owner, const asio::error_code& ec, std::size_t bytes_transferred) { func_(&owner, this, ec, bytes_transferred); } void destroy() { func_(0, this, asio::error_code(), 0); } protected: typedef void (*func_type)(task_io_service*, task_io_service_operation*, const asio::error_code&, std::size_t); task_io_service_operation(func_type func) : next_(0), func_(func), task_result_(0) { } // Prevents deletion through this type. ~task_io_service_operation() { } private: friend class op_queue_access; task_io_service_operation* next_; func_type func_; protected: friend class task_io_service; unsigned int task_result_; // Passed into bytes transferred. }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TASK_IO_SERVICE_OPERATION_HPP galera-26.4.3/asio/asio/detail/timer_scheduler.hpp0000664000177500017540000000204513540715002020376 0ustar dbartmy// // detail/timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_scheduler_fwd.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_timer_scheduler.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_service.hpp" #elif defined(ASIO_HAS_EPOLL) # include "asio/detail/epoll_reactor.hpp" #elif defined(ASIO_HAS_KQUEUE) # include "asio/detail/kqueue_reactor.hpp" #elif defined(ASIO_HAS_DEV_POLL) # include "asio/detail/dev_poll_reactor.hpp" #else # include "asio/detail/select_reactor.hpp" #endif #endif // ASIO_DETAIL_TIMER_SCHEDULER_HPP galera-26.4.3/asio/asio/detail/socket_holder.hpp0000664000177500017540000000377313540715002020056 0ustar dbartmy// // detail/socket_holder.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_HOLDER_HPP #define ASIO_DETAIL_SOCKET_HOLDER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Implement the resource acquisition is initialisation idiom for sockets. class socket_holder : private noncopyable { public: // Construct as an uninitialised socket. socket_holder() : socket_(invalid_socket) { } // Construct to take ownership of the specified socket. explicit socket_holder(socket_type s) : socket_(s) { } // Destructor. ~socket_holder() { if (socket_ != invalid_socket) { asio::error_code ec; socket_ops::state_type state = 0; socket_ops::close(socket_, state, true, ec); } } // Get the underlying socket. socket_type get() const { return socket_; } // Reset to an uninitialised socket. void reset() { if (socket_ != invalid_socket) { asio::error_code ec; socket_ops::state_type state = 0; socket_ops::close(socket_, state, true, ec); socket_ = invalid_socket; } } // Reset to take ownership of the specified socket. void reset(socket_type s) { reset(); socket_ = s; } // Release ownership of the socket. socket_type release() { socket_type tmp = socket_; socket_ = invalid_socket; return tmp; } private: // The underlying socket. socket_type socket_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_HOLDER_HPP galera-26.4.3/asio/asio/detail/win_iocp_socket_accept_op.hpp0000664000177500017540000001177013540715002022421 0ustar dbartmy// // detail/win_iocp_socket_accept_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_accept_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_accept_op); win_iocp_socket_accept_op(win_iocp_socket_service_base& socket_service, socket_type socket, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, bool enable_connection_aborted, Handler& handler) : operation(&win_iocp_socket_accept_op::do_complete), socket_service_(socket_service), socket_(socket), peer_(peer), protocol_(protocol), peer_endpoint_(peer_endpoint), enable_connection_aborted_(enable_connection_aborted), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } socket_holder& new_socket() { return new_socket_; } void* output_buffer() { return output_buffer_; } DWORD address_length() { return sizeof(sockaddr_storage_type) + 16; } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_accept_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner) { typename Protocol::endpoint peer_endpoint; std::size_t addr_len = peer_endpoint.capacity(); socket_ops::complete_iocp_accept(o->socket_, o->output_buffer(), o->address_length(), peer_endpoint.data(), &addr_len, o->new_socket_.get(), ec); // Restart the accept operation if we got the connection_aborted error // and the enable_connection_aborted socket option is not set. if (ec == asio::error::connection_aborted && !o->enable_connection_aborted_) { o->reset(); o->socket_service_.restart_accept_op(o->socket_, o->new_socket_, o->protocol_.family(), o->protocol_.type(), o->protocol_.protocol(), o->output_buffer(), o->address_length(), o); p.v = p.p = 0; return; } // If the socket was successfully accepted, transfer ownership of the // socket to the peer object. if (!ec) { o->peer_.assign(o->protocol_, typename Socket::native_handle_type( o->new_socket_.get(), peer_endpoint), ec); if (!ec) o->new_socket_.release(); } // Pass endpoint back to caller. if (o->peer_endpoint_) *o->peer_endpoint_ = peer_endpoint; } ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: win_iocp_socket_service_base& socket_service_; socket_type socket_; socket_holder new_socket_; Socket& peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; unsigned char output_buffer_[(sizeof(sockaddr_storage_type) + 16) * 2]; bool enable_connection_aborted_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP galera-26.4.3/asio/asio/detail/win_iocp_overlapped_ptr.hpp0000664000177500017540000000653113540715002022141 0ustar dbartmy// // detail/win_iocp_overlapped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP #define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/io_service.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/win_iocp_overlapped_op.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Wraps a handler to create an OVERLAPPED object for use with overlapped I/O. class win_iocp_overlapped_ptr : private noncopyable { public: // Construct an empty win_iocp_overlapped_ptr. win_iocp_overlapped_ptr() : ptr_(0), iocp_service_(0) { } // Construct an win_iocp_overlapped_ptr to contain the specified handler. template explicit win_iocp_overlapped_ptr( asio::io_service& io_service, ASIO_MOVE_ARG(Handler) handler) : ptr_(0), iocp_service_(0) { this->reset(io_service, ASIO_MOVE_CAST(Handler)(handler)); } // Destructor automatically frees the OVERLAPPED object unless released. ~win_iocp_overlapped_ptr() { reset(); } // Reset to empty. void reset() { if (ptr_) { ptr_->destroy(); ptr_ = 0; iocp_service_->work_finished(); iocp_service_ = 0; } } // Reset to contain the specified handler, freeing any current OVERLAPPED // object. template void reset(asio::io_service& io_service, Handler handler) { typedef win_iocp_overlapped_op op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", &io_service.impl_, "overlapped")); io_service.impl_.work_started(); reset(); ptr_ = p.p; p.v = p.p = 0; iocp_service_ = &io_service.impl_; } // Get the contained OVERLAPPED object. OVERLAPPED* get() { return ptr_; } // Get the contained OVERLAPPED object. const OVERLAPPED* get() const { return ptr_; } // Release ownership of the OVERLAPPED object. OVERLAPPED* release() { if (ptr_) iocp_service_->on_pending(ptr_); OVERLAPPED* tmp = ptr_; ptr_ = 0; iocp_service_ = 0; return tmp; } // Post completion notification for overlapped operation. Releases ownership. void complete(const asio::error_code& ec, std::size_t bytes_transferred) { if (ptr_) { iocp_service_->on_completion(ptr_, ec, static_cast(bytes_transferred)); ptr_ = 0; iocp_service_ = 0; } } private: win_iocp_operation* ptr_; win_iocp_io_service* iocp_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP galera-26.4.3/asio/asio/detail/impl/0000775000177500017540000000000013540715002015447 5ustar dbartmygalera-26.4.3/asio/asio/detail/impl/win_iocp_io_service.ipp0000664000177500017540000003462413540715002022210 0ustar dbartmy// // detail/impl/win_iocp_io_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/win_iocp_io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_iocp_io_service::work_finished_on_block_exit { ~work_finished_on_block_exit() { io_service_->work_finished(); } win_iocp_io_service* io_service_; }; struct win_iocp_io_service::timer_thread_function { void operator()() { while (::InterlockedExchangeAdd(&io_service_->shutdown_, 0) == 0) { if (::WaitForSingleObject(io_service_->waitable_timer_.handle, INFINITE) == WAIT_OBJECT_0) { ::InterlockedExchange(&io_service_->dispatch_required_, 1); ::PostQueuedCompletionStatus(io_service_->iocp_.handle, 0, wake_for_dispatch, 0); } } } win_iocp_io_service* io_service_; }; win_iocp_io_service::win_iocp_io_service( asio::io_service& io_service, size_t concurrency_hint) : asio::detail::service_base(io_service), iocp_(), outstanding_work_(0), stopped_(0), stop_event_posted_(0), shutdown_(0), gqcs_timeout_(get_gqcs_timeout()), dispatch_required_(0) { ASIO_HANDLER_TRACKING_INIT; iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, static_cast(concurrency_hint < DWORD(~0) ? concurrency_hint : DWORD(~0))); if (!iocp_.handle) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "iocp"); } } void win_iocp_io_service::shutdown_service() { ::InterlockedExchange(&shutdown_, 1); if (timer_thread_.get()) { LARGE_INTEGER timeout; timeout.QuadPart = 1; ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE); } while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0) { op_queue ops; timer_queues_.get_all_timers(ops); ops.push(completed_ops_); if (!ops.empty()) { while (win_iocp_operation* op = ops.front()) { ops.pop(); ::InterlockedDecrement(&outstanding_work_); op->destroy(); } } else { DWORD bytes_transferred = 0; dword_ptr_t completion_key = 0; LPOVERLAPPED overlapped = 0; ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, &completion_key, &overlapped, gqcs_timeout_); if (overlapped) { ::InterlockedDecrement(&outstanding_work_); static_cast(overlapped)->destroy(); } } } if (timer_thread_.get()) timer_thread_->join(); } asio::error_code win_iocp_io_service::register_handle( HANDLE handle, asio::error_code& ec) { if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } return ec; } size_t win_iocp_io_service::run(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); size_t n = 0; while (do_one(true, ec)) if (n != (std::numeric_limits::max)()) ++n; return n; } size_t win_iocp_io_service::run_one(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(true, ec); } size_t win_iocp_io_service::poll(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); size_t n = 0; while (do_one(false, ec)) if (n != (std::numeric_limits::max)()) ++n; return n; } size_t win_iocp_io_service::poll_one(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(false, ec); } void win_iocp_io_service::stop() { if (::InterlockedExchange(&stopped_, 1) == 0) { if (::InterlockedExchange(&stop_event_posted_, 1) == 0) { if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "pqcs"); } } } } void win_iocp_io_service::post_deferred_completion(win_iocp_operation* op) { // Flag the operation as ready. op->ready_ = 1; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } void win_iocp_io_service::post_deferred_completions( op_queue& ops) { while (win_iocp_operation* op = ops.front()) { ops.pop(); // Flag the operation as ready. op->ready_ = 1; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); completed_ops_.push(ops); ::InterlockedExchange(&dispatch_required_, 1); } } } void win_iocp_io_service::abandon_operations( op_queue& ops) { while (win_iocp_operation* op = ops.front()) { ops.pop(); ::InterlockedDecrement(&outstanding_work_); op->destroy(); } } void win_iocp_io_service::on_pending(win_iocp_operation* op) { if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) { // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } } void win_iocp_io_service::on_completion(win_iocp_operation* op, DWORD last_error, DWORD bytes_transferred) { // Flag that the operation is ready for invocation. op->ready_ = 1; // Store results in the OVERLAPPED structure. op->Internal = reinterpret_cast( &asio::error::get_system_category()); op->Offset = last_error; op->OffsetHigh = bytes_transferred; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } void win_iocp_io_service::on_completion(win_iocp_operation* op, const asio::error_code& ec, DWORD bytes_transferred) { // Flag that the operation is ready for invocation. op->ready_ = 1; // Store results in the OVERLAPPED structure. op->Internal = reinterpret_cast(&ec.category()); op->Offset = ec.value(); op->OffsetHigh = bytes_transferred; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } size_t win_iocp_io_service::do_one(bool block, asio::error_code& ec) { for (;;) { // Try to acquire responsibility for dispatching timers and completed ops. if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1) { mutex::scoped_lock lock(dispatch_mutex_); // Dispatch pending timers and operations. op_queue ops; ops.push(completed_ops_); timer_queues_.get_ready_timers(ops); post_deferred_completions(ops); update_timeout(); } // Get the next operation from the queue. DWORD bytes_transferred = 0; dword_ptr_t completion_key = 0; LPOVERLAPPED overlapped = 0; ::SetLastError(0); BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, &completion_key, &overlapped, block ? gqcs_timeout_ : 0); DWORD last_error = ::GetLastError(); if (overlapped) { win_iocp_operation* op = static_cast(overlapped); asio::error_code result_ec(last_error, asio::error::get_system_category()); // We may have been passed the last_error and bytes_transferred in the // OVERLAPPED structure itself. if (completion_key == overlapped_contains_result) { result_ec = asio::error_code(static_cast(op->Offset), *reinterpret_cast(op->Internal)); bytes_transferred = op->OffsetHigh; } // Otherwise ensure any result has been saved into the OVERLAPPED // structure. else { op->Internal = reinterpret_cast(&result_ec.category()); op->Offset = result_ec.value(); op->OffsetHigh = bytes_transferred; } // Dispatch the operation only if ready. The operation may not be ready // if the initiating function (e.g. a call to WSARecv) has not yet // returned. This is because the initiating function still wants access // to the operation's OVERLAPPED structure. if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) { // Ensure the count of outstanding work is decremented on block exit. work_finished_on_block_exit on_exit = { this }; (void)on_exit; op->complete(*this, result_ec, bytes_transferred); ec = asio::error_code(); return 1; } } else if (!ok) { if (last_error != WAIT_TIMEOUT) { ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } // If we're not polling we need to keep going until we get a real handler. if (block) continue; ec = asio::error_code(); return 0; } else if (completion_key == wake_for_dispatch) { // We have been woken up to try to acquire responsibility for dispatching // timers and completed operations. } else { // Indicate that there is no longer an in-flight stop event. ::InterlockedExchange(&stop_event_posted_, 0); // The stopped_ flag is always checked to ensure that any leftover // stop events from a previous run invocation are ignored. if (::InterlockedExchangeAdd(&stopped_, 0) != 0) { // Wake up next thread that is blocked on GetQueuedCompletionStatus. if (::InterlockedExchange(&stop_event_posted_, 1) == 0) { if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) { last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } } ec = asio::error_code(); return 0; } } } } DWORD win_iocp_io_service::get_gqcs_timeout() { OSVERSIONINFOEX osvi; ZeroMemory(&osvi, sizeof(osvi)); osvi.dwOSVersionInfoSize = sizeof(osvi); osvi.dwMajorVersion = 6ul; const uint64_t condition_mask = ::VerSetConditionMask( 0, VER_MAJORVERSION, VER_GREATER_EQUAL); if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask)) return INFINITE; return default_gqcs_timeout; } void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(dispatch_mutex_); timer_queues_.insert(&queue); if (!waitable_timer_.handle) { waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0); if (waitable_timer_.handle == 0) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "timer"); } LARGE_INTEGER timeout; timeout.QuadPart = -max_timeout_usec; timeout.QuadPart *= 10; ::SetWaitableTimer(waitable_timer_.handle, &timeout, max_timeout_msec, 0, 0, FALSE); } if (!timer_thread_.get()) { timer_thread_function thread_function = { this }; timer_thread_.reset(new thread(thread_function, 65536)); } } void win_iocp_io_service::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(dispatch_mutex_); timer_queues_.erase(&queue); } void win_iocp_io_service::update_timeout() { if (timer_thread_.get()) { // There's no point updating the waitable timer if the new timeout period // exceeds the maximum timeout. In that case, we might as well wait for the // existing period of the timer to expire. long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec); if (timeout_usec < max_timeout_usec) { LARGE_INTEGER timeout; timeout.QuadPart = -timeout_usec; timeout.QuadPart *= 10; ::SetWaitableTimer(waitable_timer_.handle, &timeout, max_timeout_msec, 0, 0, FALSE); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/select_reactor.hpp0000664000177500017540000000453313540715002021163 0ustar dbartmy// // detail/impl/select_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP #define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void select_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void select_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void select_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) interrupter_.interrupt(); } template std::size_t select_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP galera-26.4.3/asio/asio/detail/impl/win_static_mutex.ipp0000664000177500017540000000657013540715002021557 0ustar dbartmy// // detail/impl/win_static_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP #define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include #include "asio/detail/throw_error.hpp" #include "asio/detail/win_static_mutex.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void win_static_mutex::init() { int error = do_init(); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "static_mutex"); } int win_static_mutex::do_init() { using namespace std; // For sprintf. wchar_t mutex_name[128]; #if defined(ASIO_HAS_SECURE_RTL) swprintf_s( #else // defined(ASIO_HAS_SECURE_RTL) _snwprintf( #endif // defined(ASIO_HAS_SECURE_RTL) mutex_name, 128, L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p", static_cast(::GetCurrentProcessId()), this); #if defined(ASIO_WINDOWS_APP) HANDLE mutex = ::CreateMutexExW(0, mutex_name, CREATE_MUTEX_INITIAL_OWNER, 0); #else // defined(ASIO_WINDOWS_APP) HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name); #endif // defined(ASIO_WINDOWS_APP) DWORD last_error = ::GetLastError(); if (mutex == 0) return ::GetLastError(); if (last_error == ERROR_ALREADY_EXISTS) { #if defined(ASIO_WINDOWS_APP) ::WaitForSingleObjectEx(mutex, INFINITE, false); #else // defined(ASIO_WINDOWS_APP) ::WaitForSingleObject(mutex, INFINITE); #endif // defined(ASIO_WINDOWS_APP) } if (initialised_) { ::ReleaseMutex(mutex); ::CloseHandle(mutex); return 0; } #if defined(__MINGW32__) // Not sure if MinGW supports structured exception handling, so for now // we'll just call the Windows API and hope. # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # endif #else __try { # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # elif defined(ASIO_WINDOWS_APP) if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # endif } __except(GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { ::ReleaseMutex(mutex); ::CloseHandle(mutex); return ERROR_OUTOFMEMORY; } #endif initialised_ = true; ::ReleaseMutex(mutex); ::CloseHandle(mutex); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP galera-26.4.3/asio/asio/detail/impl/posix_tss_ptr.ipp0000664000177500017540000000215513540715002021104 0ustar dbartmy// // detail/impl/posix_tss_ptr.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP #define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_tss_ptr.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void posix_tss_ptr_create(pthread_key_t& key) { int error = ::pthread_key_create(&key, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "tss"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP galera-26.4.3/asio/asio/detail/impl/service_registry.ipp0000664000177500017540000001233513540715002021555 0ustar dbartmy// // detail/impl/service_registry.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP #define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/service_registry.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { service_registry::~service_registry() { // Shutdown all services. This must be done in a separate loop before the // services are destroyed since the destructors of user-defined handler // objects may try to access other service objects. asio::io_service::service* service = first_service_; while (service) { service->shutdown_service(); service = service->next_; } // Destroy all services. while (first_service_) { asio::io_service::service* next_service = first_service_->next_; destroy(first_service_); first_service_ = next_service; } } void service_registry::notify_fork(asio::io_service::fork_event fork_ev) { // Make a copy of all of the services while holding the lock. We don't want // to hold the lock while calling into each service, as it may try to call // back into this class. std::vector services; { asio::detail::mutex::scoped_lock lock(mutex_); asio::io_service::service* service = first_service_; while (service) { services.push_back(service); service = service->next_; } } // If processing the fork_prepare event, we want to go in reverse order of // service registration, which happens to be the existing order of the // services in the vector. For the other events we want to go in the other // direction. std::size_t num_services = services.size(); if (fork_ev == asio::io_service::fork_prepare) for (std::size_t i = 0; i < num_services; ++i) services[i]->fork_service(fork_ev); else for (std::size_t i = num_services; i > 0; --i) services[i - 1]->fork_service(fork_ev); } void service_registry::init_key(asio::io_service::service::key& key, const asio::io_service::id& id) { key.type_info_ = 0; key.id_ = &id; } bool service_registry::keys_match( const asio::io_service::service::key& key1, const asio::io_service::service::key& key2) { if (key1.id_ && key2.id_) if (key1.id_ == key2.id_) return true; if (key1.type_info_ && key2.type_info_) if (*key1.type_info_ == *key2.type_info_) return true; return false; } void service_registry::destroy(asio::io_service::service* service) { delete service; } asio::io_service::service* service_registry::do_use_service( const asio::io_service::service::key& key, factory_type factory) { asio::detail::mutex::scoped_lock lock(mutex_); // First see if there is an existing service object with the given key. asio::io_service::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) return service; service = service->next_; } // Create a new service object. The service registry's mutex is not locked // at this time to allow for nested calls into this function from the new // service's constructor. lock.unlock(); auto_service_ptr new_service = { factory(owner_) }; new_service.ptr_->key_ = key; lock.lock(); // Check that nobody else created another service object of the same type // while the lock was released. service = first_service_; while (service) { if (keys_match(service->key_, key)) return service; service = service->next_; } // Service was successfully initialised, pass ownership to registry. new_service.ptr_->next_ = first_service_; first_service_ = new_service.ptr_; new_service.ptr_ = 0; return first_service_; } void service_registry::do_add_service( const asio::io_service::service::key& key, asio::io_service::service* new_service) { if (&owner_ != &new_service->get_io_service()) asio::detail::throw_exception(invalid_service_owner()); asio::detail::mutex::scoped_lock lock(mutex_); // Check if there is an existing service object with the given key. asio::io_service::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) asio::detail::throw_exception(service_already_exists()); service = service->next_; } // Take ownership of the service object. new_service->key_ = key; new_service->next_ = first_service_; first_service_ = new_service; } bool service_registry::do_has_service( const asio::io_service::service::key& key) const { asio::detail::mutex::scoped_lock lock(mutex_); asio::io_service::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) return true; service = service->next_; } return false; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP galera-26.4.3/asio/asio/detail/impl/select_reactor.ipp0000664000177500017540000002064513540715002021166 0ustar dbartmy// // detail/impl/select_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP #define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include "asio/detail/bind_handler.hpp" #include "asio/detail/fd_set_adapter.hpp" #include "asio/detail/select_reactor.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { select_reactor::select_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), interrupter_(), #if defined(ASIO_HAS_IOCP) stop_thread_(false), thread_(0), #endif // defined(ASIO_HAS_IOCP) shutdown_(false) { #if defined(ASIO_HAS_IOCP) asio::detail::signal_blocker sb; thread_ = new asio::detail::thread( bind_handler(&select_reactor::call_run_thread, this)); #endif // defined(ASIO_HAS_IOCP) } select_reactor::~select_reactor() { shutdown_service(); } void select_reactor::shutdown_service() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; #if defined(ASIO_HAS_IOCP) stop_thread_ = true; #endif // defined(ASIO_HAS_IOCP) lock.unlock(); #if defined(ASIO_HAS_IOCP) if (thread_) { interrupter_.interrupt(); thread_->join(); delete thread_; thread_ = 0; } #endif // defined(ASIO_HAS_IOCP) op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].get_all_operations(ops); timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void select_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) interrupter_.recreate(); } void select_reactor::init_task() { io_service_.init_task(); } int select_reactor::register_descriptor(socket_type, select_reactor::per_descriptor_data&) { return 0; } int select_reactor::register_internal_descriptor( int op_type, socket_type descriptor, select_reactor::per_descriptor_data&, reactor_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue_[op_type].enqueue_operation(descriptor, op); interrupter_.interrupt(); return 0; } void select_reactor::move_descriptor(socket_type, select_reactor::per_descriptor_data&, select_reactor::per_descriptor_data&) { } void select_reactor::start_op(int op_type, socket_type descriptor, select_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation, bool) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { post_immediate_completion(op, is_continuation); return; } bool first = op_queue_[op_type].enqueue_operation(descriptor, op); io_service_.work_started(); if (first) interrupter_.interrupt(); } void select_reactor::cancel_ops(socket_type descriptor, select_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void select_reactor::deregister_descriptor(socket_type descriptor, select_reactor::per_descriptor_data&, bool) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void select_reactor::deregister_internal_descriptor( socket_type descriptor, select_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].cancel_operations(descriptor, ops); } void select_reactor::run(bool block, op_queue& ops) { asio::detail::mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_IOCP) // Check if the thread is supposed to stop. if (stop_thread_) return; #endif // defined(ASIO_HAS_IOCP) // Set up the descriptor sets. for (int i = 0; i < max_select_ops; ++i) fd_sets_[i].reset(); fd_sets_[read_op].set(interrupter_.read_descriptor()); socket_type max_fd = 0; bool have_work_to_do = !timer_queues_.all_empty(); for (int i = 0; i < max_select_ops; ++i) { have_work_to_do = have_work_to_do || !op_queue_[i].empty(); fd_sets_[i].set(op_queue_[i], ops); if (fd_sets_[i].max_descriptor() > max_fd) max_fd = fd_sets_[i].max_descriptor(); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Connection operations on Windows use both except and write fd_sets. have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty(); fd_sets_[write_op].set(op_queue_[connect_op], ops); if (fd_sets_[write_op].max_descriptor() > max_fd) max_fd = fd_sets_[write_op].max_descriptor(); fd_sets_[except_op].set(op_queue_[connect_op], ops); if (fd_sets_[except_op].max_descriptor() > max_fd) max_fd = fd_sets_[except_op].max_descriptor(); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (!block && !have_work_to_do) return; // Determine how long to block while waiting for events. timeval tv_buf = { 0, 0 }; timeval* tv = block ? get_timeout(tv_buf) : &tv_buf; lock.unlock(); // Block on the select call until descriptors become ready. asio::error_code ec; int retval = socket_ops::select(static_cast(max_fd + 1), fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec); // Reset the interrupter. if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor())) { interrupter_.reset(); --retval; } lock.lock(); // Dispatch all ready operations. if (retval > 0) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Connection operations on Windows use both except and write fd_sets. fd_sets_[except_op].perform(op_queue_[connect_op], ops); fd_sets_[write_op].perform(op_queue_[connect_op], ops); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. for (int i = max_select_ops - 1; i >= 0; --i) fd_sets_[i].perform(op_queue_[i], ops); } timer_queues_.get_ready_timers(ops); } void select_reactor::interrupt() { interrupter_.interrupt(); } #if defined(ASIO_HAS_IOCP) void select_reactor::run_thread() { asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { lock.unlock(); op_queue ops; run(true, ops); io_service_.post_deferred_completions(ops); lock.lock(); } } void select_reactor::call_run_thread(select_reactor* reactor) { reactor->run_thread(); } #endif // defined(ASIO_HAS_IOCP) void select_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void select_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } timeval* select_reactor::get_timeout(timeval& tv) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); tv.tv_sec = usec / 1000000; tv.tv_usec = usec % 1000000; return &tv; } void select_reactor::cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec) { bool need_interrupt = false; op_queue ops; for (int i = 0; i < max_ops; ++i) need_interrupt = op_queue_[i].cancel_operations( descriptor, ops, ec) || need_interrupt; io_service_.post_deferred_completions(ops); if (need_interrupt) interrupter_.interrupt(); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE)) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP galera-26.4.3/asio/asio/detail/impl/eventfd_select_interrupter.ipp0000664000177500017540000001060113540715002023614 0ustar dbartmy// // detail/impl/eventfd_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EVENTFD) #include #include #include #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # include #else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # include #endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 #include "asio/detail/cstdint.hpp" #include "asio/detail/eventfd_select_interrupter.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { eventfd_select_interrupter::eventfd_select_interrupter() { open_descriptors(); } void eventfd_select_interrupter::open_descriptors() { #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0); if (read_descriptor_ != -1) { ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); } #else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) write_descriptor_ = read_descriptor_ = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); # else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) errno = EINVAL; write_descriptor_ = read_descriptor_ = -1; # endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) if (read_descriptor_ == -1 && errno == EINVAL) { write_descriptor_ = read_descriptor_ = ::eventfd(0, 0); if (read_descriptor_ != -1) { ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); } } #endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 if (read_descriptor_ == -1) { int pipe_fds[2]; if (pipe(pipe_fds) == 0) { read_descriptor_ = pipe_fds[0]; ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); write_descriptor_ = pipe_fds[1]; ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "eventfd_select_interrupter"); } } } eventfd_select_interrupter::~eventfd_select_interrupter() { close_descriptors(); } void eventfd_select_interrupter::close_descriptors() { if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_) ::close(write_descriptor_); if (read_descriptor_ != -1) ::close(read_descriptor_); } void eventfd_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = -1; read_descriptor_ = -1; open_descriptors(); } void eventfd_select_interrupter::interrupt() { uint64_t counter(1UL); int result = ::write(write_descriptor_, &counter, sizeof(uint64_t)); (void)result; } bool eventfd_select_interrupter::reset() { if (write_descriptor_ == read_descriptor_) { for (;;) { // Only perform one read. The kernel maintains an atomic counter. uint64_t counter(0); errno = 0; int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); return was_interrupted; } } else { for (;;) { // Clear all data from the pipe. char data[1024]; int bytes_read = ::read(read_descriptor_, data, sizeof(data)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = ::read(read_descriptor_, data, sizeof(data)); return was_interrupted; } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EVENTFD) #endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP galera-26.4.3/asio/asio/detail/impl/dev_poll_reactor.ipp0000664000177500017540000003037013540715002021507 0ustar dbartmy// // detail/impl/dev_poll_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP #define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include "asio/detail/dev_poll_reactor.hpp" #include "asio/detail/assert.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { dev_poll_reactor::dev_poll_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), dev_poll_fd_(do_dev_poll_create()), interrupter_(), shutdown_(false) { // Add the interrupter's descriptor to /dev/poll. ::pollfd ev = { 0, 0, 0 }; ev.fd = interrupter_.read_descriptor(); ev.events = POLLIN | POLLERR; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); } dev_poll_reactor::~dev_poll_reactor() { shutdown_service(); ::close(dev_poll_fd_); } void dev_poll_reactor::shutdown_service() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].get_all_operations(ops); timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void dev_poll_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) { detail::mutex::scoped_lock lock(mutex_); if (dev_poll_fd_ != -1) ::close(dev_poll_fd_); dev_poll_fd_ = -1; dev_poll_fd_ = do_dev_poll_create(); interrupter_.recreate(); // Add the interrupter's descriptor to /dev/poll. ::pollfd ev = { 0, 0, 0 }; ev.fd = interrupter_.read_descriptor(); ev.events = POLLIN | POLLERR; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); // Re-register all descriptors with /dev/poll. The changes will be written // to the /dev/poll descriptor the next time the reactor is run. for (int i = 0; i < max_ops; ++i) { reactor_op_queue::iterator iter = op_queue_[i].begin(); reactor_op_queue::iterator end = op_queue_[i].end(); for (; iter != end; ++iter) { ::pollfd& pending_ev = add_pending_event_change(iter->first); pending_ev.events |= POLLERR | POLLHUP; switch (i) { case read_op: pending_ev.events |= POLLIN; break; case write_op: pending_ev.events |= POLLOUT; break; case except_op: pending_ev.events |= POLLPRI; break; default: break; } } } interrupter_.interrupt(); } } void dev_poll_reactor::init_task() { io_service_.init_task(); } int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&) { return 0; } int dev_poll_reactor::register_internal_descriptor(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue_[op_type].enqueue_operation(descriptor, op); ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLERR | POLLHUP; switch (op_type) { case read_op: ev.events |= POLLIN; break; case write_op: ev.events |= POLLOUT; break; case except_op: ev.events |= POLLPRI; break; default: break; } interrupter_.interrupt(); return 0; } void dev_poll_reactor::move_descriptor(socket_type, dev_poll_reactor::per_descriptor_data&, dev_poll_reactor::per_descriptor_data&) { } void dev_poll_reactor::start_op(int op_type, socket_type descriptor, dev_poll_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation, bool allow_speculative) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { post_immediate_completion(op, is_continuation); return; } if (allow_speculative) { if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor)) { if (!op_queue_[op_type].has_operation(descriptor)) { if (op->perform()) { lock.unlock(); io_service_.post_immediate_completion(op, is_continuation); return; } } } } bool first = op_queue_[op_type].enqueue_operation(descriptor, op); io_service_.work_started(); if (first) { ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLERR | POLLHUP; if (op_type == read_op || op_queue_[read_op].has_operation(descriptor)) ev.events |= POLLIN; if (op_type == write_op || op_queue_[write_op].has_operation(descriptor)) ev.events |= POLLOUT; if (op_type == except_op || op_queue_[except_op].has_operation(descriptor)) ev.events |= POLLPRI; interrupter_.interrupt(); } } void dev_poll_reactor::cancel_ops(socket_type descriptor, dev_poll_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void dev_poll_reactor::deregister_descriptor(socket_type descriptor, dev_poll_reactor::per_descriptor_data&, bool) { asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from /dev/poll. ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLREMOVE; interrupter_.interrupt(); // Cancel any outstanding operations associated with the descriptor. cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void dev_poll_reactor::deregister_internal_descriptor( socket_type descriptor, dev_poll_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from /dev/poll. Since this function is only called // during a fork, we can apply the change immediately. ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLREMOVE; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); // Destroy all operations associated with the descriptor. op_queue ops; asio::error_code ec; for (int i = 0; i < max_ops; ++i) op_queue_[i].cancel_operations(descriptor, ops, ec); } void dev_poll_reactor::run(bool block, op_queue& ops) { asio::detail::mutex::scoped_lock lock(mutex_); // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (!block && op_queue_[read_op].empty() && op_queue_[write_op].empty() && op_queue_[except_op].empty() && timer_queues_.all_empty()) return; // Write the pending event registration changes to the /dev/poll descriptor. std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size(); if (events_size > 0) { errno = 0; int result = ::write(dev_poll_fd_, &pending_event_changes_[0], events_size); if (result != static_cast(events_size)) { asio::error_code ec = asio::error_code( errno, asio::error::get_system_category()); for (std::size_t i = 0; i < pending_event_changes_.size(); ++i) { int descriptor = pending_event_changes_[i].fd; for (int j = 0; j < max_ops; ++j) op_queue_[j].cancel_operations(descriptor, ops, ec); } } pending_event_changes_.clear(); pending_event_change_index_.clear(); } int timeout = block ? get_timeout() : 0; lock.unlock(); // Block on the /dev/poll descriptor. ::pollfd events[128] = { { 0, 0, 0 } }; ::dvpoll dp = { 0, 0, 0 }; dp.dp_fds = events; dp.dp_nfds = 128; dp.dp_timeout = timeout; int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp); lock.lock(); // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { int descriptor = events[i].fd; if (descriptor == interrupter_.read_descriptor()) { interrupter_.reset(); } else { bool more_reads = false; bool more_writes = false; bool more_except = false; // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. if (events[i].events & (POLLPRI | POLLERR | POLLHUP)) more_except = op_queue_[except_op].perform_operations(descriptor, ops); else more_except = op_queue_[except_op].has_operation(descriptor); if (events[i].events & (POLLIN | POLLERR | POLLHUP)) more_reads = op_queue_[read_op].perform_operations(descriptor, ops); else more_reads = op_queue_[read_op].has_operation(descriptor); if (events[i].events & (POLLOUT | POLLERR | POLLHUP)) more_writes = op_queue_[write_op].perform_operations(descriptor, ops); else more_writes = op_queue_[write_op].has_operation(descriptor); if ((events[i].events & (POLLERR | POLLHUP)) != 0 && !more_except && !more_reads && !more_writes) { // If we have an event and no operations associated with the // descriptor then we need to delete the descriptor from /dev/poll. // The poll operation can produce POLLHUP or POLLERR events when there // is no operation pending, so if we do not remove the descriptor we // can end up in a tight polling loop. ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLREMOVE; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); } else { ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLERR | POLLHUP; if (more_reads) ev.events |= POLLIN; if (more_writes) ev.events |= POLLOUT; if (more_except) ev.events |= POLLPRI; ev.revents = 0; int result = ::write(dev_poll_fd_, &ev, sizeof(ev)); if (result != sizeof(ev)) { asio::error_code ec(errno, asio::error::get_system_category()); for (int j = 0; j < max_ops; ++j) op_queue_[j].cancel_operations(descriptor, ops, ec); } } } } timer_queues_.get_ready_timers(ops); } void dev_poll_reactor::interrupt() { interrupter_.interrupt(); } int dev_poll_reactor::do_dev_poll_create() { int fd = ::open("/dev/poll", O_RDWR); if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "/dev/poll"); } return fd; } void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } int dev_poll_reactor::get_timeout() { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. return timer_queues_.wait_duration_msec(5 * 60 * 1000); } void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec) { bool need_interrupt = false; op_queue ops; for (int i = 0; i < max_ops; ++i) need_interrupt = op_queue_[i].cancel_operations( descriptor, ops, ec) || need_interrupt; io_service_.post_deferred_completions(ops); if (need_interrupt) interrupter_.interrupt(); } ::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor) { hash_map::iterator iter = pending_event_change_index_.find(descriptor); if (iter == pending_event_change_index_.end()) { std::size_t index = pending_event_changes_.size(); pending_event_changes_.reserve(pending_event_changes_.size() + 1); pending_event_change_index_.insert(std::make_pair(descriptor, index)); pending_event_changes_.push_back(::pollfd()); pending_event_changes_[index].fd = descriptor; pending_event_changes_[index].revents = 0; return pending_event_changes_[index]; } else { return pending_event_changes_[iter->second]; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP galera-26.4.3/asio/asio/detail/impl/win_mutex.ipp0000664000177500017540000000377513540715002020214 0ustar dbartmy// // detail/impl/win_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP #define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_mutex.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_mutex::win_mutex() { int error = do_init(); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "mutex"); } int win_mutex::do_init() { #if defined(__MINGW32__) // Not sure if MinGW supports structured exception handling, so for now // we'll just call the Windows API and hope. # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # elif defined(ASIO_WINDOWS_APP) ::InitializeCriticalSectionEx(&crit_section_, 0x80000000, 0); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) return ::GetLastError(); # endif return 0; #else __try { # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # elif defined(ASIO_WINDOWS_APP) if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) return ::GetLastError(); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) return ::GetLastError(); # endif } __except(GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { return ERROR_OUTOFMEMORY; } return 0; #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP galera-26.4.3/asio/asio/detail/impl/win_tss_ptr.ipp0000664000177500017540000000244213540715002020536 0ustar dbartmy// // detail/impl/win_tss_ptr.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP #define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_tss_ptr.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD win_tss_ptr_create() { #if defined(UNDER_CE) enum { out_of_indexes = 0xFFFFFFFF }; #else enum { out_of_indexes = TLS_OUT_OF_INDEXES }; #endif DWORD tss_key = ::TlsAlloc(); if (tss_key == out_of_indexes) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "tss"); } return tss_key; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP galera-26.4.3/asio/asio/detail/impl/winrt_timer_scheduler.hpp0000664000177500017540000000412313540715002022561 0ustar dbartmy// // detail/impl/winrt_timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void winrt_timer_scheduler::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void winrt_timer_scheduler::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void winrt_timer_scheduler::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) event_.signal(lock); } template std::size_t winrt_timer_scheduler::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP galera-26.4.3/asio/asio/detail/impl/reactive_socket_service_base.ipp0000664000177500017540000001662213540715002024054 0ustar dbartmy// // detail/reactive_socket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) \ && !defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/reactive_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_socket_service_base::reactive_socket_service_base( asio::io_service& io_service) : reactor_(use_service(io_service)) { reactor_.init_task(); } void reactive_socket_service_base::shutdown_service() { } void reactive_socket_service_base::construct( reactive_socket_service_base::base_implementation_type& impl) { impl.socket_ = invalid_socket; impl.state_ = 0; } void reactive_socket_service_base::base_move_construct( reactive_socket_service_base::base_implementation_type& impl, reactive_socket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; reactor_.move_descriptor(impl.socket_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_socket_service_base::base_move_assign( reactive_socket_service_base::base_implementation_type& impl, reactive_socket_service_base& other_service, reactive_socket_service_base::base_implementation_type& other_impl) { destroy(impl); impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; other_service.reactor_.move_descriptor(impl.socket_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_socket_service_base::destroy( reactive_socket_service_base::base_implementation_type& impl) { if (impl.socket_ != invalid_socket) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, (impl.state_ & socket_ops::possible_dup) == 0); asio::error_code ignored_ec; socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); } } asio::error_code reactive_socket_service_base::close( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, (impl.state_ & socket_ops::possible_dup) == 0); } socket_ops::close(impl.socket_, impl.state_, false, ec); // The descriptor is closed by the OS even if close() returns an error. // // (Actually, POSIX says the state of the descriptor is unspecified. On // Linux the descriptor is apparently closed anyway; e.g. see // http://lkml.org/lkml/2005/9/10/129 // We'll just have to assume that other OSes follow the same behaviour. The // known exception is when Windows's closesocket() function fails with // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close(). construct(impl); return ec; } asio::error_code reactive_socket_service_base::cancel( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("socket", &impl, "cancel")); reactor_.cancel_ops(impl.socket_, impl.reactor_data_); ec = asio::error_code(); return ec; } asio::error_code reactive_socket_service_base::do_open( reactive_socket_service_base::base_implementation_type& impl, int af, int type, int protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } socket_holder sock(socket_ops::socket(af, type, protocol, ec)); if (sock.get() == invalid_socket) return ec; if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.socket_ = sock.release(); switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } ec = asio::error_code(); return ec; } asio::error_code reactive_socket_service_base::do_assign( reactive_socket_service_base::base_implementation_type& impl, int type, const reactive_socket_service_base::native_handle_type& native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (int err = reactor_.register_descriptor( native_socket, impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.socket_ = native_socket; switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.state_ |= socket_ops::possible_dup; ec = asio::error_code(); return ec; } void reactive_socket_service_base::start_op( reactive_socket_service_base::base_implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop) { if (!noop) { if ((impl.state_ & socket_ops::non_blocking) || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { reactor_.start_op(op_type, impl.socket_, impl.reactor_data_, op, is_continuation, is_non_blocking); return; } } reactor_.post_immediate_completion(op, is_continuation); } void reactive_socket_service_base::start_accept_op( reactive_socket_service_base::base_implementation_type& impl, reactor_op* op, bool is_continuation, bool peer_is_open) { if (!peer_is_open) start_op(impl, reactor::read_op, op, true, is_continuation, false); else { op->ec_ = asio::error::already_open; reactor_.post_immediate_completion(op, is_continuation); } } void reactive_socket_service_base::start_connect_op( reactive_socket_service_base::base_implementation_type& impl, reactor_op* op, bool is_continuation, const socket_addr_type* addr, size_t addrlen) { if ((impl.state_ & socket_ops::non_blocking) || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) { if (op->ec_ == asio::error::in_progress || op->ec_ == asio::error::would_block) { op->ec_ = asio::error_code(); reactor_.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_, op, is_continuation, false); return; } } } reactor_.post_immediate_completion(op, is_continuation); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP galera-26.4.3/asio/asio/detail/impl/timer_queue_ptime.ipp0000664000177500017540000000415113540715002021704 0ustar dbartmy// // detail/impl/timer_queue_ptime.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP #define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_ptime.hpp" #include "asio/detail/push_options.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) namespace asio { namespace detail { timer_queue >::timer_queue() { } timer_queue >::~timer_queue() { } bool timer_queue >::enqueue_timer( const time_type& time, per_timer_data& timer, wait_op* op) { return impl_.enqueue_timer(time, timer, op); } bool timer_queue >::empty() const { return impl_.empty(); } long timer_queue >::wait_duration_msec( long max_duration) const { return impl_.wait_duration_msec(max_duration); } long timer_queue >::wait_duration_usec( long max_duration) const { return impl_.wait_duration_usec(max_duration); } void timer_queue >::get_ready_timers( op_queue& ops) { impl_.get_ready_timers(ops); } void timer_queue >::get_all_timers( op_queue& ops) { impl_.get_all_timers(ops); } std::size_t timer_queue >::cancel_timer( per_timer_data& timer, op_queue& ops, std::size_t max_cancelled) { return impl_.cancel_timer(timer, ops, max_cancelled); } } // namespace detail } // namespace asio #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP galera-26.4.3/asio/asio/detail/impl/win_iocp_io_service.hpp0000664000177500017540000000703413540715002022202 0ustar dbartmy// // detail/impl/win_iocp_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP #define ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/addressof.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void win_iocp_io_service::dispatch(Handler& handler) { if (thread_call_stack::contains(this)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); } else { // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch")); post_immediate_completion(p.p, false); p.v = p.p = 0; } } template void win_iocp_io_service::post(Handler& handler) { // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "post")); post_immediate_completion(p.p, false); p.v = p.p = 0; } template void win_iocp_io_service::add_timer_queue( timer_queue& queue) { do_add_timer_queue(queue); } template void win_iocp_io_service::remove_timer_queue( timer_queue& queue) { do_remove_timer_queue(queue); } template void win_iocp_io_service::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { // If the service has been shut down we silently discard the timer. if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) { post_immediate_completion(op, false); return; } mutex::scoped_lock lock(dispatch_mutex_); bool earliest = queue.enqueue_timer(time, timer, op); work_started(); if (earliest) update_timeout(); } template std::size_t win_iocp_io_service::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { // If the service has been shut down we silently ignore the cancellation. if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) return 0; mutex::scoped_lock lock(dispatch_mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP galera-26.4.3/asio/asio/detail/impl/buffer_sequence_adapter.ipp0000664000177500017540000000573213540715002023031 0ustar dbartmy// // detail/impl/buffer_sequence_adapter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP #define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include #include #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_buffer_impl : public Microsoft::WRL::RuntimeClass< Microsoft::WRL::RuntimeClassFlags< Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>, ABI::Windows::Storage::Streams::IBuffer, Windows::Storage::Streams::IBufferByteAccess> { public: explicit winrt_buffer_impl(const asio::const_buffer& b) { bytes_ = const_cast(asio::buffer_cast(b)); length_ = asio::buffer_size(b); capacity_ = asio::buffer_size(b); } explicit winrt_buffer_impl(const asio::mutable_buffer& b) { bytes_ = const_cast(asio::buffer_cast(b)); length_ = 0; capacity_ = asio::buffer_size(b); } ~winrt_buffer_impl() { } STDMETHODIMP Buffer(byte** value) { *value = bytes_; return S_OK; } STDMETHODIMP get_Capacity(UINT32* value) { *value = capacity_; return S_OK; } STDMETHODIMP get_Length(UINT32 *value) { *value = length_; return S_OK; } STDMETHODIMP put_Length(UINT32 value) { if (value > capacity_) return E_INVALIDARG; length_ = value; return S_OK; } private: byte* bytes_; UINT32 length_; UINT32 capacity_; }; void buffer_sequence_adapter_base::init_native_buffer( buffer_sequence_adapter_base::native_buffer_type& buf, const asio::mutable_buffer& buffer) { std::memset(&buf, 0, sizeof(native_buffer_type)); Microsoft::WRL::ComPtr insp = Microsoft::WRL::Make(buffer); buf = reinterpret_cast(insp.Get()); } void buffer_sequence_adapter_base::init_native_buffer( buffer_sequence_adapter_base::native_buffer_type& buf, const asio::const_buffer& buffer) { std::memset(&buf, 0, sizeof(native_buffer_type)); Microsoft::WRL::ComPtr insp = Microsoft::WRL::Make(buffer); Platform::Object^ buf_obj = reinterpret_cast(insp.Get()); buf = reinterpret_cast(insp.Get()); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP galera-26.4.3/asio/asio/detail/impl/winrt_ssocket_service_base.ipp0000664000177500017540000003763313540715002023605 0ustar dbartmy// // detail/impl/winrt_ssocket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/winrt_ssocket_service_base.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { winrt_ssocket_service_base::winrt_ssocket_service_base( asio::io_service& io_service) : io_service_(use_service(io_service)), async_manager_(use_service(io_service)), mutex_(), impl_list_(0) { } void winrt_ssocket_service_base::shutdown_service() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); base_implementation_type* impl = impl_list_; while (impl) { asio::error_code ignored_ec; close(*impl, ignored_ec); impl = impl->next_; } } void winrt_ssocket_service_base::construct( winrt_ssocket_service_base::base_implementation_type& impl) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void winrt_ssocket_service_base::base_move_construct( winrt_ssocket_service_base::base_implementation_type& impl, winrt_ssocket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = nullptr; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void winrt_ssocket_service_base::base_move_assign( winrt_ssocket_service_base::base_implementation_type& impl, winrt_ssocket_service_base& other_service, winrt_ssocket_service_base::base_implementation_type& other_impl) { asio::error_code ignored_ec; close(impl, ignored_ec); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.socket_ = other_impl.socket_; other_impl.socket_ = nullptr; if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void winrt_ssocket_service_base::destroy( winrt_ssocket_service_base::base_implementation_type& impl) { asio::error_code ignored_ec; close(impl, ignored_ec); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code winrt_ssocket_service_base::close( winrt_ssocket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (impl.socket_) { delete impl.socket_; impl.socket_ = nullptr; } ec = asio::error_code(); return ec; } std::size_t winrt_ssocket_service_base::do_get_endpoint( const base_implementation_type& impl, bool local, void* addr, std::size_t addr_len, asio::error_code& ec) const { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return addr_len; } try { std::string addr_string = winrt_utils::string(local ? impl.socket_->Information->LocalAddress->CanonicalName : impl.socket_->Information->RemoteAddress->CanonicalName); unsigned short port = winrt_utils::integer(local ? impl.socket_->Information->LocalPort : impl.socket_->Information->RemotePort); unsigned long scope = 0; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): if (addr_len < sizeof(sockaddr_in4_type)) { ec = asio::error::invalid_argument; return addr_len; } else { socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(), &reinterpret_cast(addr)->sin_addr, &scope, ec); reinterpret_cast(addr)->sin_port = socket_ops::host_to_network_short(port); ec = asio::error_code(); return sizeof(sockaddr_in4_type); } case ASIO_OS_DEF(AF_INET6): if (addr_len < sizeof(sockaddr_in6_type)) { ec = asio::error::invalid_argument; return addr_len; } else { socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(), &reinterpret_cast(addr)->sin6_addr, &scope, ec); reinterpret_cast(addr)->sin6_port = socket_ops::host_to_network_short(port); ec = asio::error_code(); return sizeof(sockaddr_in6_type); } default: ec = asio::error::address_family_not_supported; return addr_len; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return addr_len; } } asio::error_code winrt_ssocket_service_base::do_set_option( winrt_ssocket_service_base::base_implementation_type& impl, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } try { if (level == ASIO_OS_DEF(SOL_SOCKET) && optname == ASIO_OS_DEF(SO_KEEPALIVE)) { if (optlen == sizeof(int)) { int value = 0; std::memcpy(&value, optval, optlen); impl.socket_->Control->KeepAlive = !!value; ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else if (level == ASIO_OS_DEF(IPPROTO_TCP) && optname == ASIO_OS_DEF(TCP_NODELAY)) { if (optlen == sizeof(int)) { int value = 0; std::memcpy(&value, optval, optlen); impl.socket_->Control->NoDelay = !!value; ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else { ec = asio::error::invalid_argument; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } void winrt_ssocket_service_base::do_get_option( const winrt_ssocket_service_base::base_implementation_type& impl, int level, int optname, void* optval, std::size_t* optlen, asio::error_code& ec) const { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return; } try { if (level == ASIO_OS_DEF(SOL_SOCKET) && optname == ASIO_OS_DEF(SO_KEEPALIVE)) { if (*optlen >= sizeof(int)) { int value = impl.socket_->Control->KeepAlive ? 1 : 0; std::memcpy(optval, &value, sizeof(int)); *optlen = sizeof(int); ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else if (level == ASIO_OS_DEF(IPPROTO_TCP) && optname == ASIO_OS_DEF(TCP_NODELAY)) { if (*optlen >= sizeof(int)) { int value = impl.socket_->Control->NoDelay ? 1 : 0; std::memcpy(optval, &value, sizeof(int)); *optlen = sizeof(int); ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else { ec = asio::error::invalid_argument; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } } asio::error_code winrt_ssocket_service_base::do_connect( winrt_ssocket_service_base::base_implementation_type& impl, const void* addr, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } char addr_string[max_addr_v6_str_len]; unsigned short port; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), &reinterpret_cast(addr)->sin_addr, addr_string, sizeof(addr_string), 0, ec); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin_port); break; case ASIO_OS_DEF(AF_INET6): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), &reinterpret_cast(addr)->sin6_addr, addr_string, sizeof(addr_string), 0, ec); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin6_port); break; default: ec = asio::error::address_family_not_supported; return ec; } if (!ec) try { async_manager_.sync(impl.socket_->ConnectAsync( ref new Windows::Networking::HostName( winrt_utils::string(addr_string)), winrt_utils::string(port)), ec); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } void winrt_ssocket_service_base::start_connect_op( winrt_ssocket_service_base::base_implementation_type& impl, const void* addr, winrt_async_op* op, bool is_continuation) { if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; io_service_.post_immediate_completion(op, is_continuation); return; } char addr_string[max_addr_v6_str_len]; unsigned short port = 0; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), &reinterpret_cast(addr)->sin_addr, addr_string, sizeof(addr_string), 0, op->ec_); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin_port); break; case ASIO_OS_DEF(AF_INET6): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), &reinterpret_cast(addr)->sin6_addr, addr_string, sizeof(addr_string), 0, op->ec_); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin6_port); break; default: op->ec_ = asio::error::address_family_not_supported; break; } if (op->ec_) { io_service_.post_immediate_completion(op, is_continuation); return; } try { async_manager_.async(impl.socket_->ConnectAsync( ref new Windows::Networking::HostName( winrt_utils::string(addr_string)), winrt_utils::string(port)), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code( e->HResult, asio::system_category()); io_service_.post_immediate_completion(op, is_continuation); } } std::size_t winrt_ssocket_service_base::do_send( winrt_ssocket_service_base::base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, asio::error_code& ec) { if (flags) { ec = asio::error::operation_not_supported; return 0; } if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { ec = asio::error_code(); return 0; } return async_manager_.sync( impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return 0; } } void winrt_ssocket_service_base::start_send_op( winrt_ssocket_service_base::base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation) { if (flags) { op->ec_ = asio::error::operation_not_supported; io_service_.post_immediate_completion(op, is_continuation); return; } if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; io_service_.post_immediate_completion(op, is_continuation); return; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { io_service_.post_immediate_completion(op, is_continuation); return; } async_manager_.async( impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code(e->HResult, asio::system_category()); io_service_.post_immediate_completion(op, is_continuation); } } std::size_t winrt_ssocket_service_base::do_receive( winrt_ssocket_service_base::base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, asio::error_code& ec) { if (flags) { ec = asio::error::operation_not_supported; return 0; } if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { ec = asio::error_code(); return 0; } async_manager_.sync( impl.socket_->InputStream->ReadAsync( bufs.buffers()[0], bufs.buffers()[0]->Capacity, Windows::Storage::Streams::InputStreamOptions::Partial), ec); std::size_t bytes_transferred = bufs.buffers()[0]->Length; if (bytes_transferred == 0 && !ec) { ec = asio::error::eof; } return bytes_transferred; } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return 0; } } void winrt_ssocket_service_base::start_receive_op( winrt_ssocket_service_base::base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation) { if (flags) { op->ec_ = asio::error::operation_not_supported; io_service_.post_immediate_completion(op, is_continuation); return; } if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; io_service_.post_immediate_completion(op, is_continuation); return; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { io_service_.post_immediate_completion(op, is_continuation); return; } async_manager_.async( impl.socket_->InputStream->ReadAsync( bufs.buffers()[0], bufs.buffers()[0]->Capacity, Windows::Storage::Streams::InputStreamOptions::Partial), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code(e->HResult, asio::system_category()); io_service_.post_immediate_completion(op, is_continuation); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP galera-26.4.3/asio/asio/detail/impl/dev_poll_reactor.hpp0000664000177500017540000000377313540715002021515 0ustar dbartmy// // detail/impl/dev_poll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP #define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void dev_poll_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } template void dev_poll_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void dev_poll_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) interrupter_.interrupt(); } template std::size_t dev_poll_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP galera-26.4.3/asio/asio/detail/impl/posix_mutex.ipp0000664000177500017540000000212313540715002020543 0ustar dbartmy// // detail/impl/posix_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP #define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_mutex.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_mutex::posix_mutex() { int error = ::pthread_mutex_init(&mutex_, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "mutex"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP galera-26.4.3/asio/asio/detail/impl/strand_service.ipp0000664000177500017540000001131313540715002021173 0ustar dbartmy// // detail/impl/strand_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP #define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/strand_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct strand_service::on_do_complete_exit { io_service_impl* owner_; strand_impl* impl_; ~on_do_complete_exit() { impl_->mutex_.lock(); impl_->ready_queue_.push(impl_->waiting_queue_); bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); impl_->mutex_.unlock(); if (more_handlers) owner_->post_immediate_completion(impl_, true); } }; strand_service::strand_service(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(asio::use_service(io_service)), mutex_(), salt_(0) { } void strand_service::shutdown_service() { op_queue ops; asio::detail::mutex::scoped_lock lock(mutex_); for (std::size_t i = 0; i < num_implementations; ++i) { if (strand_impl* impl = implementations_[i].get()) { ops.push(impl->waiting_queue_); ops.push(impl->ready_queue_); } } } void strand_service::construct(strand_service::implementation_type& impl) { asio::detail::mutex::scoped_lock lock(mutex_); std::size_t salt = salt_++; #if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) std::size_t index = salt; #else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) std::size_t index = reinterpret_cast(&impl); index += (reinterpret_cast(&impl) >> 3); index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2); #endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) index = index % num_implementations; if (!implementations_[index].get()) implementations_[index].reset(new strand_impl); impl = implementations_[index].get(); } bool strand_service::running_in_this_thread( const implementation_type& impl) const { return call_stack::contains(impl) != 0; } bool strand_service::do_dispatch(implementation_type& impl, operation* op) { // If we are running inside the io_service, and no other handler already // holds the strand lock, then the handler can run immediately. bool can_dispatch = io_service_.can_dispatch(); impl->mutex_.lock(); if (can_dispatch && !impl->locked_) { // Immediate invocation is allowed. impl->locked_ = true; impl->mutex_.unlock(); return true; } if (impl->locked_) { // Some other handler already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_.unlock(); } else { // The handler is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_.unlock(); impl->ready_queue_.push(op); io_service_.post_immediate_completion(impl, false); } return false; } void strand_service::do_post(implementation_type& impl, operation* op, bool is_continuation) { impl->mutex_.lock(); if (impl->locked_) { // Some other handler already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_.unlock(); } else { // The handler is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_.unlock(); impl->ready_queue_.push(op); io_service_.post_immediate_completion(impl, is_continuation); } } void strand_service::do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t /*bytes_transferred*/) { if (owner) { strand_impl* impl = static_cast(base); // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_do_complete_exit on_exit = { owner, impl }; (void)on_exit; // Run all ready handlers. No lock is required since the ready queue is // accessed only within the strand. while (operation* o = impl->ready_queue_.front()) { impl->ready_queue_.pop(); o->complete(*owner, ec, 0); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/descriptor_ops.ipp0000664000177500017540000002513513540715002021226 0ustar dbartmy// // detail/impl/descriptor_ops.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP #define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/descriptor_ops.hpp" #include "asio/error.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace descriptor_ops { int open(const char* path, int flags, asio::error_code& ec) { errno = 0; int result = error_wrapper(::open(path, flags), ec); if (result >= 0) ec = asio::error_code(); return result; } int close(int d, state_type& state, asio::error_code& ec) { int result = 0; if (d != -1) { errno = 0; result = error_wrapper(::close(d), ec); if (result != 0 && (ec == asio::error::would_block || ec == asio::error::try_again)) { // According to UNIX Network Programming Vol. 1, it is possible for // close() to fail with EWOULDBLOCK under certain circumstances. What // isn't clear is the state of the descriptor after this error. The one // current OS where this behaviour is seen, Windows, says that the socket // remains open. Therefore we'll put the descriptor back into blocking // mode and have another attempt at closing it. #if defined(__SYMBIAN32__) int flags = ::fcntl(d, F_GETFL, 0); if (flags >= 0) ::fcntl(d, F_SETFL, flags & ~O_NONBLOCK); #else // defined(__SYMBIAN32__) ioctl_arg_type arg = 0; ::ioctl(d, FIONBIO, &arg); #endif // defined(__SYMBIAN32__) state &= ~non_blocking; errno = 0; result = error_wrapper(::close(d), ec); } } if (result == 0) ec = asio::error_code(); return result; } bool set_user_non_blocking(int d, state_type& state, bool value, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return false; } errno = 0; #if defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); if (result >= 0) { errno = 0; int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); } #else // defined(__SYMBIAN32__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); #endif // defined(__SYMBIAN32__) if (result >= 0) { ec = asio::error_code(); if (value) state |= user_set_non_blocking; else { // Clearing the user-set non-blocking mode always overrides any // internally-set non-blocking flag. Any subsequent asynchronous // operations will need to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } return true; } return false; } bool set_internal_non_blocking(int d, state_type& state, bool value, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return false; } if (!value && (state & user_set_non_blocking)) { // It does not make sense to clear the internal non-blocking flag if the // user still wants non-blocking behaviour. Return an error and let the // caller figure out whether to update the user-set non-blocking flag. ec = asio::error::invalid_argument; return false; } errno = 0; #if defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); if (result >= 0) { errno = 0; int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); } #else // defined(__SYMBIAN32__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); #endif // defined(__SYMBIAN32__) if (result >= 0) { ec = asio::error_code(); if (value) state |= internal_non_blocking; else state &= ~internal_non_blocking; return true; } return false; } std::size_t sync_read(int d, state_type state, buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream is a no-op. if (all_empty) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. errno = 0; signed_size_type bytes = error_wrapper(::readv( d, bufs, static_cast(count)), ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Check for EOF. if (bytes == 0) { ec = asio::error::eof; return 0; } // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for descriptor to become ready. if (descriptor_ops::poll_read(d, 0, ec) < 0) return 0; } } bool non_blocking_read(int d, buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred) { for (;;) { // Read some data. errno = 0; signed_size_type bytes = error_wrapper(::readv( d, bufs, static_cast(count)), ec); // Check for end of stream. if (bytes == 0) { ec = asio::error::eof; return true; } // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes > 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } std::size_t sync_write(int d, state_type state, const buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes on a stream is a no-op. if (all_empty) { ec = asio::error_code(); return 0; } // Write some data. for (;;) { // Try to complete the operation without blocking. errno = 0; signed_size_type bytes = error_wrapper(::writev( d, bufs, static_cast(count)), ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for descriptor to become ready. if (descriptor_ops::poll_write(d, 0, ec) < 0) return 0; } } bool non_blocking_write(int d, const buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred) { for (;;) { // Write some data. errno = 0; signed_size_type bytes = error_wrapper(::writev( d, bufs, static_cast(count)), ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } int ioctl(int d, state_type& state, long cmd, ioctl_arg_type* arg, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::ioctl(d, cmd, arg), ec); if (result >= 0) { ec = asio::error_code(); // When updating the non-blocking mode we always perform the ioctl syscall, // even if the flags would otherwise indicate that the descriptor is // already in the correct state. This ensures that the underlying // descriptor is put into the state that has been requested by the user. If // the ioctl syscall was successful then we need to update the flags to // match. if (cmd == static_cast(FIONBIO)) { if (*arg) { state |= user_set_non_blocking; } else { // Clearing the non-blocking mode always overrides any internally-set // non-blocking flag. Any subsequent asynchronous operations will need // to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } } } return result; } int fcntl(int d, int cmd, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::fcntl(d, cmd), ec); if (result != -1) ec = asio::error_code(); return result; } int fcntl(int d, int cmd, long arg, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::fcntl(d, cmd, arg), ec); if (result != -1) ec = asio::error_code(); return result; } int poll_read(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLIN; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_write(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLOUT; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } } // namespace descriptor_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP galera-26.4.3/asio/asio/detail/impl/service_registry.hpp0000664000177500017540000000427213540715002021555 0ustar dbartmy// // detail/impl/service_registry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP #define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template service_registry::service_registry( asio::io_service& o, Service*, Arg arg) : owner_(o), first_service_(new Service(o, arg)) { asio::io_service::service::key key; init_key(key, Service::id); first_service_->key_ = key; first_service_->next_ = 0; } template Service& service_registry::first_service() { return *static_cast(first_service_); } template Service& service_registry::use_service() { asio::io_service::service::key key; init_key(key, Service::id); factory_type factory = &service_registry::create; return *static_cast(do_use_service(key, factory)); } template void service_registry::add_service(Service* new_service) { asio::io_service::service::key key; init_key(key, Service::id); return do_add_service(key, new_service); } template bool service_registry::has_service() const { asio::io_service::service::key key; init_key(key, Service::id); return do_has_service(key); } #if !defined(ASIO_NO_TYPEID) template void service_registry::init_key(asio::io_service::service::key& key, const asio::detail::service_id& /*id*/) { key.type_info_ = &typeid(typeid_wrapper); key.id_ = 0; } #endif // !defined(ASIO_NO_TYPEID) template asio::io_service::service* service_registry::create( asio::io_service& owner) { return new Service(owner); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP galera-26.4.3/asio/asio/detail/impl/socket_select_interrupter.ipp0000664000177500017540000001260713540715002023461 0ustar dbartmy// // detail/impl/socket_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_select_interrupter.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { socket_select_interrupter::socket_select_interrupter() { open_descriptors(); } void socket_select_interrupter::open_descriptors() { asio::error_code ec; socket_holder acceptor(socket_ops::socket( AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); if (acceptor.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); int opt = 1; socket_ops::state_type acceptor_state = 0; socket_ops::setsockopt(acceptor.get(), acceptor_state, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec); using namespace std; // For memset. sockaddr_in4_type addr; std::size_t addr_len = sizeof(addr); memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); addr.sin_port = 0; if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr, addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr, &addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); // Some broken firewalls on Windows will intermittently cause getsockname to // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We // explicitly specify the target address here to work around this problem. if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY)) addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); if (socket_ops::listen(acceptor.get(), SOMAXCONN, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); socket_holder client(socket_ops::socket( AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); if (client.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr, addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec)); if (server.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); ioctl_arg_type non_blocking = 1; socket_ops::state_type client_state = 0; if (socket_ops::ioctl(client.get(), client_state, FIONBIO, &non_blocking, ec)) asio::detail::throw_error(ec, "socket_select_interrupter"); opt = 1; socket_ops::setsockopt(client.get(), client_state, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); non_blocking = 1; socket_ops::state_type server_state = 0; if (socket_ops::ioctl(server.get(), server_state, FIONBIO, &non_blocking, ec)) asio::detail::throw_error(ec, "socket_select_interrupter"); opt = 1; socket_ops::setsockopt(server.get(), server_state, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); read_descriptor_ = server.release(); write_descriptor_ = client.release(); } socket_select_interrupter::~socket_select_interrupter() { close_descriptors(); } void socket_select_interrupter::close_descriptors() { asio::error_code ec; socket_ops::state_type state = socket_ops::internal_non_blocking; if (read_descriptor_ != invalid_socket) socket_ops::close(read_descriptor_, state, true, ec); if (write_descriptor_ != invalid_socket) socket_ops::close(write_descriptor_, state, true, ec); } void socket_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = invalid_socket; read_descriptor_ = invalid_socket; open_descriptors(); } void socket_select_interrupter::interrupt() { char byte = 0; socket_ops::buf b; socket_ops::init_buf(b, &byte, 1); asio::error_code ec; socket_ops::send(write_descriptor_, &b, 1, 0, ec); } bool socket_select_interrupter::reset() { char data[1024]; socket_ops::buf b; socket_ops::init_buf(b, data, sizeof(data)); asio::error_code ec; int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); return was_interrupted; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP galera-26.4.3/asio/asio/detail/impl/kqueue_reactor.hpp0000664000177500017540000000407713540715002021206 0ustar dbartmy// // detail/impl/kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP #define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void kqueue_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void kqueue_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void kqueue_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) interrupt(); } template std::size_t kqueue_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP galera-26.4.3/asio/asio/detail/impl/win_event.ipp0000664000177500017540000000346513540715002020167 0ustar dbartmy// // detail/win_event.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP #define ASIO_DETAIL_IMPL_WIN_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_event.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_event::win_event() : state_(0) { #if defined(ASIO_WINDOWS_APP) events_[0] = ::CreateEventExW(0, 0, CREATE_EVENT_MANUAL_RESET, 0); #else // defined(ASIO_WINDOWS_APP) events_[0] = ::CreateEventW(0, true, false, 0); #endif // defined(ASIO_WINDOWS_APP) if (!events_[0]) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } #if defined(ASIO_WINDOWS_APP) events_[1] = ::CreateEventExW(0, 0, 0, 0); #else // defined(ASIO_WINDOWS_APP) events_[1] = ::CreateEventW(0, false, false, 0); #endif // defined(ASIO_WINDOWS_APP) if (!events_[1]) { DWORD last_error = ::GetLastError(); ::CloseHandle(events_[0]); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } } win_event::~win_event() { ::CloseHandle(events_[0]); ::CloseHandle(events_[1]); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP galera-26.4.3/asio/asio/detail/impl/signal_set_service.ipp0000664000177500017540000004427413540715002022044 0ustar dbartmy// // detail/impl/signal_set_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP #define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/reactor.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/signal_set_service.hpp" #include "asio/detail/static_mutex.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct signal_state { // Mutex used for protecting global state. static_mutex mutex_; // The read end of the pipe used for signal notifications. int read_descriptor_; // The write end of the pipe used for signal notifications. int write_descriptor_; // Whether the signal state has been prepared for a fork. bool fork_prepared_; // The head of a linked list of all signal_set_service instances. class signal_set_service* service_list_; // A count of the number of objects that are registered for each signal. std::size_t registration_count_[max_signal_number]; }; signal_state* get_signal_state() { static signal_state state = { ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } }; return &state; } void asio_signal_handler(int signal_number) { #if defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) signal_set_service::deliver_signal(signal_number); #else // defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) int saved_errno = errno; signal_state* state = get_signal_state(); signed_size_type result = ::write(state->write_descriptor_, &signal_number, sizeof(signal_number)); (void)result; errno = saved_errno; #endif // defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) #if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) ::signal(signal_number, asio_signal_handler); #endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) } #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) class signal_set_service::pipe_read_op : public reactor_op { public: pipe_read_op() : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete) { } static bool do_perform(reactor_op*) { signal_state* state = get_signal_state(); int fd = state->read_descriptor_; int signal_number = 0; while (::read(fd, &signal_number, sizeof(int)) == sizeof(int)) if (signal_number >= 0 && signal_number < max_signal_number) signal_set_service::deliver_signal(signal_number); return false; } static void do_complete(io_service_impl* /*owner*/, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { pipe_read_op* o(static_cast(base)); delete o; } }; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) signal_set_service::signal_set_service( asio::io_service& io_service) : io_service_(asio::use_service(io_service)), #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) reactor_(asio::use_service(io_service)), #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) next_(0), prev_(0) { get_signal_state()->mutex_.init(); #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) reactor_.init_task(); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) for (int i = 0; i < max_signal_number; ++i) registrations_[i] = 0; add_service(this); } signal_set_service::~signal_set_service() { remove_service(this); } void signal_set_service::shutdown_service() { remove_service(this); op_queue ops; for (int i = 0; i < max_signal_number; ++i) { registration* reg = registrations_[i]; while (reg) { ops.push(*reg->queue_); reg = reg->next_in_table_; } } io_service_.abandon_operations(ops); } void signal_set_service::fork_service( asio::io_service::fork_event fork_ev) { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); switch (fork_ev) { case asio::io_service::fork_prepare: { int read_descriptor = state->read_descriptor_; state->fork_prepared_ = true; lock.unlock(); reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_); } break; case asio::io_service::fork_parent: if (state->fork_prepared_) { int read_descriptor = state->read_descriptor_; state->fork_prepared_ = false; lock.unlock(); reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, reactor_data_, new pipe_read_op); } break; case asio::io_service::fork_child: if (state->fork_prepared_) { asio::detail::signal_blocker blocker; close_descriptors(); open_descriptors(); int read_descriptor = state->read_descriptor_; state->fork_prepared_ = false; lock.unlock(); reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, reactor_data_, new pipe_read_op); } break; default: break; } #else // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) (void)fork_ev; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::construct( signal_set_service::implementation_type& impl) { impl.signals_ = 0; } void signal_set_service::destroy( signal_set_service::implementation_type& impl) { asio::error_code ignored_ec; clear(impl, ignored_ec); cancel(impl, ignored_ec); } asio::error_code signal_set_service::add( signal_set_service::implementation_type& impl, int signal_number, asio::error_code& ec) { // Check that the signal number is valid. if (signal_number < 0 || signal_number >= max_signal_number) { ec = asio::error::invalid_argument; return ec; } signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); // Find the appropriate place to insert the registration. registration** insertion_point = &impl.signals_; registration* next = impl.signals_; while (next && next->signal_number_ < signal_number) { insertion_point = &next->next_in_set_; next = next->next_in_set_; } // Only do something if the signal is not already registered. if (next == 0 || next->signal_number_ != signal_number) { registration* new_registration = new registration; #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Register for the signal if we're the first. if (state->registration_count_[signal_number] == 0) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = asio_signal_handler; sigfillset(&sa.sa_mask); if (::sigaction(signal_number, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(signal_number, asio_signal_handler) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) delete new_registration; return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Record the new registration in the set. new_registration->signal_number_ = signal_number; new_registration->queue_ = &impl.queue_; new_registration->next_in_set_ = next; *insertion_point = new_registration; // Insert registration into the registration table. new_registration->next_in_table_ = registrations_[signal_number]; if (registrations_[signal_number]) registrations_[signal_number]->prev_in_table_ = new_registration; registrations_[signal_number] = new_registration; ++state->registration_count_[signal_number]; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::remove( signal_set_service::implementation_type& impl, int signal_number, asio::error_code& ec) { // Check that the signal number is valid. if (signal_number < 0 || signal_number >= max_signal_number) { ec = asio::error::invalid_argument; return ec; } signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); // Find the signal number in the list of registrations. registration** deletion_point = &impl.signals_; registration* reg = impl.signals_; while (reg && reg->signal_number_ < signal_number) { deletion_point = ®->next_in_set_; reg = reg->next_in_set_; } if (reg != 0 && reg->signal_number_ == signal_number) { #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Set signal handler back to the default if we're the last. if (state->registration_count_[signal_number] == 1) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; if (::sigaction(signal_number, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(signal_number, SIG_DFL) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Remove the registration from the set. *deletion_point = reg->next_in_set_; // Remove the registration from the registration table. if (registrations_[signal_number] == reg) registrations_[signal_number] = reg->next_in_table_; if (reg->prev_in_table_) reg->prev_in_table_->next_in_table_ = reg->next_in_table_; if (reg->next_in_table_) reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; --state->registration_count_[signal_number]; delete reg; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::clear( signal_set_service::implementation_type& impl, asio::error_code& ec) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); while (registration* reg = impl.signals_) { #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Set signal handler back to the default if we're the last. if (state->registration_count_[reg->signal_number_] == 1) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; if (::sigaction(reg->signal_number_, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Remove the registration from the registration table. if (registrations_[reg->signal_number_] == reg) registrations_[reg->signal_number_] = reg->next_in_table_; if (reg->prev_in_table_) reg->prev_in_table_->next_in_table_ = reg->next_in_table_; if (reg->next_in_table_) reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; --state->registration_count_[reg->signal_number_]; impl.signals_ = reg->next_in_set_; delete reg; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::cancel( signal_set_service::implementation_type& impl, asio::error_code& ec) { ASIO_HANDLER_OPERATION(("signal_set", &impl, "cancel")); op_queue ops; { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); while (signal_op* op = impl.queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.queue_.pop(); ops.push(op); } } io_service_.post_deferred_completions(ops); ec = asio::error_code(); return ec; } void signal_set_service::deliver_signal(int signal_number) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); signal_set_service* service = state->service_list_; while (service) { op_queue ops; registration* reg = service->registrations_[signal_number]; while (reg) { if (reg->queue_->empty()) { ++reg->undelivered_; } else { while (signal_op* op = reg->queue_->front()) { op->signal_number_ = signal_number; reg->queue_->pop(); ops.push(op); } } reg = reg->next_in_table_; } service->io_service_.post_deferred_completions(ops); service = service->next_; } } void signal_set_service::add_service(signal_set_service* service) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If this is the first service to be created, open a new pipe. if (state->service_list_ == 0) open_descriptors(); #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // Insert service into linked list of all services. service->next_ = state->service_list_; service->prev_ = 0; if (state->service_list_) state->service_list_->prev_ = service; state->service_list_ = service; #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // Register for pipe readiness notifications. int read_descriptor = state->read_descriptor_; lock.unlock(); service->reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, service->reactor_data_, new pipe_read_op); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::remove_service(signal_set_service* service) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); if (service->next_ || service->prev_ || state->service_list_ == service) { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // Disable the pipe readiness notifications. int read_descriptor = state->read_descriptor_; lock.unlock(); service->reactor_.deregister_descriptor( read_descriptor, service->reactor_data_, false); lock.lock(); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) // Remove service from linked list of all services. if (state->service_list_ == service) state->service_list_ = service->next_; if (service->prev_) service->prev_->next_ = service->next_; if (service->next_) service->next_->prev_= service->prev_; service->next_ = 0; service->prev_ = 0; #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If this is the last service to be removed, close the pipe. if (state->service_list_ == 0) close_descriptors(); #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) } } void signal_set_service::open_descriptors() { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); int pipe_fds[2]; if (::pipe(pipe_fds) == 0) { state->read_descriptor_ = pipe_fds[0]; ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK); state->write_descriptor_ = pipe_fds[1]; ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK); #if defined(FD_CLOEXEC) ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC); ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC); #endif // defined(FD_CLOEXEC) } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "signal_set_service pipe"); } #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::close_descriptors() { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); if (state->read_descriptor_ != -1) ::close(state->read_descriptor_); state->read_descriptor_ = -1; if (state->write_descriptor_ != -1) ::close(state->write_descriptor_); state->write_descriptor_ = -1; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::start_wait_op( signal_set_service::implementation_type& impl, signal_op* op) { io_service_.work_started(); signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); registration* reg = impl.signals_; while (reg) { if (reg->undelivered_ > 0) { --reg->undelivered_; op->signal_number_ = reg->signal_number_; io_service_.post_deferred_completion(op); return; } reg = reg->next_in_set_; } impl.queue_.push(op); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/task_io_service.ipp0000664000177500017540000002603113540715002021334 0ustar dbartmy// // detail/impl/task_io_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP #define ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/detail/event.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/task_io_service.hpp" #include "asio/detail/task_io_service_thread_info.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct task_io_service::task_cleanup { ~task_cleanup() { if (this_thread_->private_outstanding_work > 0) { asio::detail::increment( task_io_service_->outstanding_work_, this_thread_->private_outstanding_work); } this_thread_->private_outstanding_work = 0; // Enqueue the completed operations and reinsert the task at the end of // the operation queue. lock_->lock(); task_io_service_->task_interrupted_ = true; task_io_service_->op_queue_.push(this_thread_->private_op_queue); task_io_service_->op_queue_.push(&task_io_service_->task_operation_); } task_io_service* task_io_service_; mutex::scoped_lock* lock_; thread_info* this_thread_; }; struct task_io_service::work_cleanup { ~work_cleanup() { if (this_thread_->private_outstanding_work > 1) { asio::detail::increment( task_io_service_->outstanding_work_, this_thread_->private_outstanding_work - 1); } else if (this_thread_->private_outstanding_work < 1) { task_io_service_->work_finished(); } this_thread_->private_outstanding_work = 0; #if defined(ASIO_HAS_THREADS) if (!this_thread_->private_op_queue.empty()) { lock_->lock(); task_io_service_->op_queue_.push(this_thread_->private_op_queue); } #endif // defined(ASIO_HAS_THREADS) } task_io_service* task_io_service_; mutex::scoped_lock* lock_; thread_info* this_thread_; }; task_io_service::task_io_service( asio::io_service& io_service, std::size_t concurrency_hint) : asio::detail::service_base(io_service), one_thread_(concurrency_hint == 1), mutex_(), task_(0), task_interrupted_(true), outstanding_work_(0), stopped_(false), shutdown_(false) { ASIO_HANDLER_TRACKING_INIT; } void task_io_service::shutdown_service() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); // Destroy handler objects. while (!op_queue_.empty()) { operation* o = op_queue_.front(); op_queue_.pop(); if (o != &task_operation_) o->destroy(); } // Reset to initial state. task_ = 0; } void task_io_service::init_task() { mutex::scoped_lock lock(mutex_); if (!shutdown_ && !task_) { task_ = &use_service(this->get_io_service()); op_queue_.push(&task_operation_); wake_one_thread_and_unlock(lock); } } std::size_t task_io_service::run(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); std::size_t n = 0; for (; do_run_one(lock, this_thread, ec); lock.lock()) if (n != (std::numeric_limits::max)()) ++n; return n; } std::size_t task_io_service::run_one(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); return do_run_one(lock, this_thread, ec); } std::size_t task_io_service::poll(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_THREADS) // We want to support nested calls to poll() and poll_one(), so any handlers // that are already on a thread-private queue need to be put on to the main // queue now. if (one_thread_) if (thread_info* outer_thread_info = ctx.next_by_key()) op_queue_.push(outer_thread_info->private_op_queue); #endif // defined(ASIO_HAS_THREADS) std::size_t n = 0; for (; do_poll_one(lock, this_thread, ec); lock.lock()) if (n != (std::numeric_limits::max)()) ++n; return n; } std::size_t task_io_service::poll_one(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_THREADS) // We want to support nested calls to poll() and poll_one(), so any handlers // that are already on a thread-private queue need to be put on to the main // queue now. if (one_thread_) if (thread_info* outer_thread_info = ctx.next_by_key()) op_queue_.push(outer_thread_info->private_op_queue); #endif // defined(ASIO_HAS_THREADS) return do_poll_one(lock, this_thread, ec); } void task_io_service::stop() { mutex::scoped_lock lock(mutex_); stop_all_threads(lock); } bool task_io_service::stopped() const { mutex::scoped_lock lock(mutex_); return stopped_; } void task_io_service::reset() { mutex::scoped_lock lock(mutex_); stopped_ = false; } void task_io_service::post_immediate_completion( task_io_service::operation* op, bool is_continuation) { #if defined(ASIO_HAS_THREADS) if (one_thread_ || is_continuation) { if (thread_info* this_thread = thread_call_stack::contains(this)) { ++this_thread->private_outstanding_work; this_thread->private_op_queue.push(op); return; } } #else // defined(ASIO_HAS_THREADS) (void)is_continuation; #endif // defined(ASIO_HAS_THREADS) work_started(); mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void task_io_service::post_deferred_completion(task_io_service::operation* op) { #if defined(ASIO_HAS_THREADS) if (one_thread_) { if (thread_info* this_thread = thread_call_stack::contains(this)) { this_thread->private_op_queue.push(op); return; } } #endif // defined(ASIO_HAS_THREADS) mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void task_io_service::post_deferred_completions( op_queue& ops) { if (!ops.empty()) { #if defined(ASIO_HAS_THREADS) if (one_thread_) { if (thread_info* this_thread = thread_call_stack::contains(this)) { this_thread->private_op_queue.push(ops); return; } } #endif // defined(ASIO_HAS_THREADS) mutex::scoped_lock lock(mutex_); op_queue_.push(ops); wake_one_thread_and_unlock(lock); } } void task_io_service::do_dispatch( task_io_service::operation* op) { work_started(); mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void task_io_service::abandon_operations( op_queue& ops) { op_queue ops2; ops2.push(ops); } std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock, task_io_service::thread_info& this_thread, const asio::error_code& ec) { while (!stopped_) { if (!op_queue_.empty()) { // Prepare to execute first handler from queue. operation* o = op_queue_.front(); op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); if (o == &task_operation_) { task_interrupted_ = more_handlers; if (more_handlers && !one_thread_) wakeup_event_.unlock_and_signal_one(lock); else lock.unlock(); task_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(!more_handlers, this_thread.private_op_queue); } else { std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(*this, ec, task_result); return 1; } } else { wakeup_event_.clear(lock); wakeup_event_.wait(lock); } } return 0; } std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock, task_io_service::thread_info& this_thread, const asio::error_code& ec) { if (stopped_) return 0; operation* o = op_queue_.front(); if (o == &task_operation_) { op_queue_.pop(); lock.unlock(); { task_cleanup c = { this, &lock, &this_thread }; (void)c; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(false, this_thread.private_op_queue); } o = op_queue_.front(); if (o == &task_operation_) { wakeup_event_.maybe_unlock_and_signal_one(lock); return 0; } } if (o == 0) return 0; op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(*this, ec, task_result); return 1; } void task_io_service::stop_all_threads( mutex::scoped_lock& lock) { stopped_ = true; wakeup_event_.signal_all(lock); if (!task_interrupted_ && task_) { task_interrupted_ = true; task_->interrupt(); } } void task_io_service::wake_one_thread_and_unlock( mutex::scoped_lock& lock) { if (!wakeup_event_.maybe_unlock_and_signal_one(lock)) { if (!task_interrupted_ && task_) { task_interrupted_ = true; task_->interrupt(); } lock.unlock(); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/epoll_reactor.ipp0000664000177500017540000004247013540715002021022 0ustar dbartmy// // detail/impl/epoll_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP #define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EPOLL) #include #include #include "asio/detail/epoll_reactor.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #if defined(ASIO_HAS_TIMERFD) # include #endif // defined(ASIO_HAS_TIMERFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { epoll_reactor::epoll_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), interrupter_(), epoll_fd_(do_epoll_create()), timer_fd_(do_timerfd_create()), shutdown_(false) { // Add the interrupter's descriptor to epoll. epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); interrupter_.interrupt(); // Add the timer descriptor to epoll. if (timer_fd_ != -1) { ev.events = EPOLLIN | EPOLLERR; ev.data.ptr = &timer_fd_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); } } epoll_reactor::~epoll_reactor() { if (epoll_fd_ != -1) close(epoll_fd_); if (timer_fd_ != -1) close(timer_fd_); } void epoll_reactor::shutdown_service() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; while (descriptor_state* state = registered_descriptors_.first()) { for (int i = 0; i < max_ops; ++i) ops.push(state->op_queue_[i]); state->shutdown_ = true; registered_descriptors_.free(state); } timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void epoll_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) { if (epoll_fd_ != -1) ::close(epoll_fd_); epoll_fd_ = -1; epoll_fd_ = do_epoll_create(); if (timer_fd_ != -1) ::close(timer_fd_); timer_fd_ = -1; timer_fd_ = do_timerfd_create(); interrupter_.recreate(); // Add the interrupter's descriptor to epoll. epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); interrupter_.interrupt(); // Add the timer descriptor to epoll. if (timer_fd_ != -1) { ev.events = EPOLLIN | EPOLLERR; ev.data.ptr = &timer_fd_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); } update_timeout(); // Re-register all descriptors with epoll. mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); for (descriptor_state* state = registered_descriptors_.first(); state != 0; state = state->next_) { ev.events = state->registered_events_; ev.data.ptr = state; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev); if (result != 0) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "epoll re-registration"); } } } } void epoll_reactor::init_task() { io_service_.init_task(); } int epoll_reactor::register_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data) { descriptor_data = allocate_descriptor_state(); { mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); descriptor_data->reactor_ = this; descriptor_data->descriptor_ = descriptor; descriptor_data->shutdown_ = false; } epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; descriptor_data->registered_events_ = ev.events; ev.data.ptr = descriptor_data; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); if (result != 0) return errno; return 0; } int epoll_reactor::register_internal_descriptor( int op_type, socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op) { descriptor_data = allocate_descriptor_state(); { mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); descriptor_data->reactor_ = this; descriptor_data->descriptor_ = descriptor; descriptor_data->shutdown_ = false; descriptor_data->op_queue_[op_type].push(op); } epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; descriptor_data->registered_events_ = ev.events; ev.data.ptr = descriptor_data; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); if (result != 0) return errno; return 0; } void epoll_reactor::move_descriptor(socket_type, epoll_reactor::per_descriptor_data& target_descriptor_data, epoll_reactor::per_descriptor_data& source_descriptor_data) { target_descriptor_data = source_descriptor_data; source_descriptor_data = 0; } void epoll_reactor::start_op(int op_type, socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative) { if (!descriptor_data) { op->ec_ = asio::error::bad_descriptor; post_immediate_completion(op, is_continuation); return; } mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (descriptor_data->shutdown_) { post_immediate_completion(op, is_continuation); return; } if (descriptor_data->op_queue_[op_type].empty()) { if (allow_speculative && (op_type != read_op || descriptor_data->op_queue_[except_op].empty())) { if (op->perform()) { descriptor_lock.unlock(); io_service_.post_immediate_completion(op, is_continuation); return; } if (op_type == write_op) { if ((descriptor_data->registered_events_ & EPOLLOUT) == 0) { epoll_event ev = { 0, { 0 } }; ev.events = descriptor_data->registered_events_ | EPOLLOUT; ev.data.ptr = descriptor_data; if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0) { descriptor_data->registered_events_ |= ev.events; } else { op->ec_ = asio::error_code(errno, asio::error::get_system_category()); io_service_.post_immediate_completion(op, is_continuation); return; } } } } else { if (op_type == write_op) { descriptor_data->registered_events_ |= EPOLLOUT; } epoll_event ev = { 0, { 0 } }; ev.events = descriptor_data->registered_events_; ev.data.ptr = descriptor_data; epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev); } } descriptor_data->op_queue_[op_type].push(op); io_service_.work_started(); } void epoll_reactor::cancel_ops(socket_type, epoll_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_lock.unlock(); io_service_.post_deferred_completions(ops); } void epoll_reactor::deregister_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, bool closing) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { if (closing) { // The descriptor will be automatically removed from the epoll set when // it is closed. } else { epoll_event ev = { 0, { 0 } }; epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); } op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; io_service_.post_deferred_completions(ops); } } void epoll_reactor::deregister_internal_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { epoll_event ev = { 0, { 0 } }; epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); op_queue ops; for (int i = 0; i < max_ops; ++i) ops.push(descriptor_data->op_queue_[i]); descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; } } void epoll_reactor::run(bool block, op_queue& ops) { // This code relies on the fact that the task_io_service queues the reactor // task behind all descriptor operations generated by this function. This // means, that by the time we reach this point, any previously returned // descriptor operations have already been dequeued. Therefore it is now safe // for us to reuse and return them for the task_io_service to queue again. // Calculate a timeout only if timerfd is not used. int timeout; if (timer_fd_ != -1) timeout = block ? -1 : 0; else { mutex::scoped_lock lock(mutex_); timeout = block ? get_timeout() : 0; } // Block on the epoll descriptor. epoll_event events[128]; int num_events = epoll_wait(epoll_fd_, events, 128, timeout); #if defined(ASIO_HAS_TIMERFD) bool check_timers = (timer_fd_ == -1); #else // defined(ASIO_HAS_TIMERFD) bool check_timers = true; #endif // defined(ASIO_HAS_TIMERFD) // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = events[i].data.ptr; if (ptr == &interrupter_) { // No need to reset the interrupter since we're leaving the descriptor // in a ready-to-read state and relying on edge-triggered notifications // to make it so that we only get woken up when the descriptor's epoll // registration is updated. #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ == -1) check_timers = true; #else // defined(ASIO_HAS_TIMERFD) check_timers = true; #endif // defined(ASIO_HAS_TIMERFD) } #if defined(ASIO_HAS_TIMERFD) else if (ptr == &timer_fd_) { check_timers = true; } #endif // defined(ASIO_HAS_TIMERFD) else { // The descriptor operation doesn't count as work in and of itself, so we // don't call work_started() here. This still allows the io_service to // stop if the only remaining operations are descriptor operations. descriptor_state* descriptor_data = static_cast(ptr); descriptor_data->set_ready_events(events[i].events); ops.push(descriptor_data); } } if (check_timers) { mutex::scoped_lock common_lock(mutex_); timer_queues_.get_ready_timers(ops); #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ != -1) { itimerspec new_timeout; itimerspec old_timeout; int flags = get_timeout(new_timeout); timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); } #endif // defined(ASIO_HAS_TIMERFD) } } void epoll_reactor::interrupt() { epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev); } int epoll_reactor::do_epoll_create() { #if defined(EPOLL_CLOEXEC) int fd = epoll_create1(EPOLL_CLOEXEC); #else // defined(EPOLL_CLOEXEC) int fd = -1; errno = EINVAL; #endif // defined(EPOLL_CLOEXEC) if (fd == -1 && (errno == EINVAL || errno == ENOSYS)) { fd = epoll_create(epoll_size); if (fd != -1) ::fcntl(fd, F_SETFD, FD_CLOEXEC); } if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "epoll"); } return fd; } int epoll_reactor::do_timerfd_create() { #if defined(ASIO_HAS_TIMERFD) # if defined(TFD_CLOEXEC) int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); # else // defined(TFD_CLOEXEC) int fd = -1; errno = EINVAL; # endif // defined(TFD_CLOEXEC) if (fd == -1 && errno == EINVAL) { fd = timerfd_create(CLOCK_MONOTONIC, 0); if (fd != -1) ::fcntl(fd, F_SETFD, FD_CLOEXEC); } return fd; #else // defined(ASIO_HAS_TIMERFD) return -1; #endif // defined(ASIO_HAS_TIMERFD) } epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state() { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); return registered_descriptors_.alloc(); } void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s) { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); registered_descriptors_.free(s); } void epoll_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } void epoll_reactor::update_timeout() { #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ != -1) { itimerspec new_timeout; itimerspec old_timeout; int flags = get_timeout(new_timeout); timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); return; } #endif // defined(ASIO_HAS_TIMERFD) interrupt(); } int epoll_reactor::get_timeout() { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. return timer_queues_.wait_duration_msec(5 * 60 * 1000); } #if defined(ASIO_HAS_TIMERFD) int epoll_reactor::get_timeout(itimerspec& ts) { ts.it_interval.tv_sec = 0; ts.it_interval.tv_nsec = 0; long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); ts.it_value.tv_sec = usec / 1000000; ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1; return usec ? 0 : TFD_TIMER_ABSTIME; } #endif // defined(ASIO_HAS_TIMERFD) struct epoll_reactor::perform_io_cleanup_on_block_exit { explicit perform_io_cleanup_on_block_exit(epoll_reactor* r) : reactor_(r), first_op_(0) { } ~perform_io_cleanup_on_block_exit() { if (first_op_) { // Post the remaining completed operations for invocation. if (!ops_.empty()) reactor_->io_service_.post_deferred_completions(ops_); // A user-initiated operation has completed, but there's no need to // explicitly call work_finished() here. Instead, we'll take advantage of // the fact that the task_io_service will call work_finished() once we // return. } else { // No user-initiated operations have completed, so we need to compensate // for the work_finished() call that the task_io_service will make once // this operation returns. reactor_->io_service_.work_started(); } } epoll_reactor* reactor_; op_queue ops_; operation* first_op_; }; epoll_reactor::descriptor_state::descriptor_state() : operation(&epoll_reactor::descriptor_state::do_complete) { } operation* epoll_reactor::descriptor_state::perform_io(uint32_t events) { mutex_.lock(); perform_io_cleanup_on_block_exit io_cleanup(reactor_); mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock); // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI }; for (int j = max_ops - 1; j >= 0; --j) { if (events & (flag[j] | EPOLLERR | EPOLLHUP)) { while (reactor_op* op = op_queue_[j].front()) { if (op->perform()) { op_queue_[j].pop(); io_cleanup.ops_.push(op); } else break; } } } // The first operation will be returned for completion now. The others will // be posted for later by the io_cleanup object's destructor. io_cleanup.first_op_ = io_cleanup.ops_.front(); io_cleanup.ops_.pop(); return io_cleanup.first_op_; } void epoll_reactor::descriptor_state::do_complete( io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { if (owner) { descriptor_state* descriptor_data = static_cast(base); uint32_t events = static_cast(bytes_transferred); if (operation* op = descriptor_data->perform_io(events)) { op->complete(*owner, ec, 0); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP galera-26.4.3/asio/asio/detail/impl/win_thread.ipp0000664000177500017540000000726113540715002020313 0ustar dbartmy// // detail/impl/win_thread.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP #define ASIO_DETAIL_IMPL_WIN_THREAD_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_APP) \ && !defined(UNDER_CE) #include #include "asio/detail/throw_error.hpp" #include "asio/detail/win_thread.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_thread::~win_thread() { ::CloseHandle(thread_); // The exit_event_ handle is deliberately allowed to leak here since it // is an error for the owner of an internal thread not to join() it. } void win_thread::join() { HANDLE handles[2] = { exit_event_, thread_ }; ::WaitForMultipleObjects(2, handles, FALSE, INFINITE); ::CloseHandle(exit_event_); if (terminate_threads()) { ::TerminateThread(thread_, 0); } else { ::QueueUserAPC(apc_function, thread_, 0); ::WaitForSingleObject(thread_, INFINITE); } } void win_thread::start_thread(func_base* arg, unsigned int stack_size) { ::HANDLE entry_event = 0; arg->entry_event_ = entry_event = ::CreateEventW(0, true, false, 0); if (!entry_event) { DWORD last_error = ::GetLastError(); delete arg; asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread.entry_event"); } arg->exit_event_ = exit_event_ = ::CreateEventW(0, true, false, 0); if (!exit_event_) { DWORD last_error = ::GetLastError(); delete arg; asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread.exit_event"); } unsigned int thread_id = 0; thread_ = reinterpret_cast(::_beginthreadex(0, stack_size, win_thread_function, arg, 0, &thread_id)); if (!thread_) { DWORD last_error = ::GetLastError(); delete arg; if (entry_event) ::CloseHandle(entry_event); if (exit_event_) ::CloseHandle(exit_event_); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } if (entry_event) { ::WaitForSingleObject(entry_event, INFINITE); ::CloseHandle(entry_event); } } unsigned int __stdcall win_thread_function(void* arg) { win_thread::auto_func_base_ptr func = { static_cast(arg) }; ::SetEvent(func.ptr->entry_event_); func.ptr->run(); // Signal that the thread has finished its work, but rather than returning go // to sleep to put the thread into a well known state. If the thread is being // joined during global object destruction then it may be killed using // TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx // call will be interrupted using QueueUserAPC and the thread will shut down // cleanly. HANDLE exit_event = func.ptr->exit_event_; delete func.ptr; func.ptr = 0; ::SetEvent(exit_event); ::SleepEx(INFINITE, TRUE); return 0; } #if defined(WINVER) && (WINVER < 0x0500) void __stdcall apc_function(ULONG) {} #else void __stdcall apc_function(ULONG_PTR) {} #endif } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_APP) // && !defined(UNDER_CE) #endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP galera-26.4.3/asio/asio/detail/impl/win_iocp_serial_port_service.ipp0000664000177500017540000001223313540715002024114 0ustar dbartmy// // detail/impl/win_iocp_serial_port_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/detail/win_iocp_serial_port_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_iocp_serial_port_service::win_iocp_serial_port_service( asio::io_service& io_service) : handle_service_(io_service) { } void win_iocp_serial_port_service::shutdown_service() { } asio::error_code win_iocp_serial_port_service::open( win_iocp_serial_port_service::implementation_type& impl, const std::string& device, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } // For convenience, add a leading \\.\ sequence if not already present. std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device; // Open a handle to the serial port. ::HANDLE handle = ::CreateFileA(name.c_str(), GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0); if (handle == INVALID_HANDLE_VALUE) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Determine the initial serial port parameters. using namespace std; // For memset. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle, &dcb)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Set some default serial port parameters. This implementation does not // support changing these, so they might as well be in a known state. dcb.fBinary = TRUE; // Win32 only supports binary mode. dcb.fDsrSensitivity = FALSE; dcb.fNull = FALSE; // Do not ignore NULL characters. dcb.fAbortOnError = FALSE; // Ignore serial framing errors. if (!::SetCommState(handle, &dcb)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Set up timeouts so that the serial port will behave similarly to a // network socket. Reads wait for at least one byte, then return with // whatever they have. Writes return once everything is out the door. ::COMMTIMEOUTS timeouts; timeouts.ReadIntervalTimeout = 1; timeouts.ReadTotalTimeoutMultiplier = 0; timeouts.ReadTotalTimeoutConstant = 0; timeouts.WriteTotalTimeoutMultiplier = 0; timeouts.WriteTotalTimeoutConstant = 0; if (!::SetCommTimeouts(handle, &timeouts)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // We're done. Take ownership of the serial port handle. if (handle_service_.assign(impl, handle, ec)) ::CloseHandle(handle); return ec; } asio::error_code win_iocp_serial_port_service::do_set_option( win_iocp_serial_port_service::implementation_type& impl, win_iocp_serial_port_service::store_function_type store, const void* option, asio::error_code& ec) { using namespace std; // For memcpy. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } if (store(option, dcb, ec)) return ec; if (!::SetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } ec = asio::error_code(); return ec; } asio::error_code win_iocp_serial_port_service::do_get_option( const win_iocp_serial_port_service::implementation_type& impl, win_iocp_serial_port_service::load_function_type load, void* option, asio::error_code& ec) const { using namespace std; // For memset. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } return load(option, dcb, ec); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/win_iocp_handle_service.ipp0000664000177500017540000003261113540715002023026 0ustar dbartmy// // detail/impl/win_iocp_handle_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_handle_service::overlapped_wrapper : public OVERLAPPED { public: explicit overlapped_wrapper(asio::error_code& ec) { Internal = 0; InternalHigh = 0; Offset = 0; OffsetHigh = 0; // Create a non-signalled manual-reset event, for GetOverlappedResult. hEvent = ::CreateEventW(0, TRUE, FALSE, 0); if (hEvent) { // As documented in GetQueuedCompletionStatus, setting the low order // bit of this event prevents our synchronous writes from being treated // as completion port events. DWORD_PTR tmp = reinterpret_cast(hEvent); hEvent = reinterpret_cast(tmp | 1); } else { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } } ~overlapped_wrapper() { if (hEvent) { ::CloseHandle(hEvent); } } }; win_iocp_handle_service::win_iocp_handle_service( asio::io_service& io_service) : iocp_service_(asio::use_service(io_service)), mutex_(), impl_list_(0) { } void win_iocp_handle_service::shutdown_service() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); implementation_type* impl = impl_list_; while (impl) { close_for_destruction(*impl); impl = impl->next_; } } void win_iocp_handle_service::construct( win_iocp_handle_service::implementation_type& impl) { impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_handle_service::move_construct( win_iocp_handle_service::implementation_type& impl, win_iocp_handle_service::implementation_type& other_impl) { impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_handle_service::move_assign( win_iocp_handle_service::implementation_type& impl, win_iocp_handle_service& other_service, win_iocp_handle_service::implementation_type& other_impl) { close_for_destruction(impl); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void win_iocp_handle_service::destroy( win_iocp_handle_service::implementation_type& impl) { close_for_destruction(impl); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code win_iocp_handle_service::assign( win_iocp_handle_service::implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (iocp_service_.register_handle(handle, ec)) return ec; impl.handle_ = handle; ec = asio::error_code(); return ec; } asio::error_code win_iocp_handle_service::close( win_iocp_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("handle", &impl, "close")); if (!::CloseHandle(impl.handle_)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; } else { ec = asio::error_code(); } return ec; } asio::error_code win_iocp_handle_service::cancel( win_iocp_handle_service::implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("handle", &impl, "cancel")); if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) { // The version of Windows supports cancellation from any thread. typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr; if (!cancel_io_ex(impl.handle_, 0)) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_NOT_FOUND) { // ERROR_NOT_FOUND means that there were no operations to be // cancelled. We swallow this error to match the behaviour on other // platforms. ec = asio::error_code(); } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } } else { ec = asio::error_code(); } } else if (impl.safe_cancellation_thread_id_ == 0) { // No operations have been started, so there's nothing to cancel. ec = asio::error_code(); } else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) { // Asynchronous operations have been started from the current thread only, // so it is safe to try to cancel them using CancelIo. if (!::CancelIo(impl.handle_)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } } else { // Asynchronous operations have been started from more than one thread, // so cancellation is not safe. ec = asio::error::operation_not_supported; } return ec; } size_t win_iocp_handle_service::do_write( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes on a handle is a no-op. if (asio::buffer_size(buffer) == 0) { ec = asio::error_code(); return 0; } overlapped_wrapper overlapped(ec); if (ec) { return 0; } // Write the data. overlapped.Offset = offset & 0xFFFFFFFF; overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::WriteFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), 0, &overlapped); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error != ERROR_IO_PENDING) { ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } } // Wait for the operation to complete. DWORD bytes_transferred = 0; ok = ::GetOverlappedResult(impl.handle_, &overlapped, &bytes_transferred, TRUE); if (!ok) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } ec = asio::error_code(); return bytes_transferred; } void win_iocp_handle_service::start_write_op( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) { iocp_service_.on_completion(op, asio::error::bad_descriptor); } else if (asio::buffer_size(buffer) == 0) { // A request to write 0 bytes on a handle is a no-op. iocp_service_.on_completion(op); } else { DWORD bytes_transferred = 0; op->Offset = offset & 0xFFFFFFFF; op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::WriteFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), &bytes_transferred, op); DWORD last_error = ::GetLastError(); if (!ok && last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { iocp_service_.on_completion(op, last_error, bytes_transferred); } else { iocp_service_.on_pending(op); } } } size_t win_iocp_handle_service::do_read( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream handle is a no-op. if (asio::buffer_size(buffer) == 0) { ec = asio::error_code(); return 0; } overlapped_wrapper overlapped(ec); if (ec) { return 0; } // Read some data. overlapped.Offset = offset & 0xFFFFFFFF; overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::ReadFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), 0, &overlapped); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { if (last_error == ERROR_HANDLE_EOF) { ec = asio::error::eof; } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } return 0; } } // Wait for the operation to complete. DWORD bytes_transferred = 0; ok = ::GetOverlappedResult(impl.handle_, &overlapped, &bytes_transferred, TRUE); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_HANDLE_EOF) { ec = asio::error::eof; } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0; } ec = asio::error_code(); return bytes_transferred; } void win_iocp_handle_service::start_read_op( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) { iocp_service_.on_completion(op, asio::error::bad_descriptor); } else if (asio::buffer_size(buffer) == 0) { // A request to read 0 bytes on a handle is a no-op. iocp_service_.on_completion(op); } else { DWORD bytes_transferred = 0; op->Offset = offset & 0xFFFFFFFF; op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::ReadFile(impl.handle_, asio::buffer_cast(buffer), static_cast(asio::buffer_size(buffer)), &bytes_transferred, op); DWORD last_error = ::GetLastError(); if (!ok && last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { iocp_service_.on_completion(op, last_error, bytes_transferred); } else { iocp_service_.on_pending(op); } } } void win_iocp_handle_service::update_cancellation_thread_id( win_iocp_handle_service::implementation_type& impl) { if (impl.safe_cancellation_thread_id_ == 0) impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) impl.safe_cancellation_thread_id_ = ~DWORD(0); } void win_iocp_handle_service::close_for_destruction(implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("handle", &impl, "close")); ::CloseHandle(impl.handle_); impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/kqueue_reactor.ipp0000664000177500017540000003344213540715002021205 0ustar dbartmy// // detail/impl/kqueue_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP #define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include "asio/detail/kqueue_reactor.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" #if defined(__NetBSD__) # define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ EV_SET(ev, ident, filt, flags, fflags, data, \ reinterpret_cast(static_cast(udata))) #else # define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ EV_SET(ev, ident, filt, flags, fflags, data, udata) #endif namespace asio { namespace detail { kqueue_reactor::kqueue_reactor(asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), kqueue_fd_(do_kqueue_create()), interrupter_(), shutdown_(false) { struct kevent events[1]; ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, &interrupter_); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) { asio::error_code error(errno, asio::error::get_system_category()); asio::detail::throw_error(error); } } kqueue_reactor::~kqueue_reactor() { close(kqueue_fd_); } void kqueue_reactor::shutdown_service() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; while (descriptor_state* state = registered_descriptors_.first()) { for (int i = 0; i < max_ops; ++i) ops.push(state->op_queue_[i]); state->shutdown_ = true; registered_descriptors_.free(state); } timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void kqueue_reactor::fork_service(asio::io_service::fork_event fork_ev) { if (fork_ev == asio::io_service::fork_child) { // The kqueue descriptor is automatically closed in the child. kqueue_fd_ = -1; kqueue_fd_ = do_kqueue_create(); interrupter_.recreate(); struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, &interrupter_); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue interrupter registration"); } // Re-register all descriptors with kqueue. mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); for (descriptor_state* state = registered_descriptors_.first(); state != 0; state = state->next_) { if (state->num_kevents_ > 0) { ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state); ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state); if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue re-registration"); } } } } } void kqueue_reactor::init_task() { io_service_.init_task(); } int kqueue_reactor::register_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data) { descriptor_data = allocate_descriptor_state(); mutex::scoped_lock lock(descriptor_data->mutex_); descriptor_data->descriptor_ = descriptor; descriptor_data->num_kevents_ = 0; descriptor_data->shutdown_ = false; return 0; } int kqueue_reactor::register_internal_descriptor( int op_type, socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op) { descriptor_data = allocate_descriptor_state(); mutex::scoped_lock lock(descriptor_data->mutex_); descriptor_data->descriptor_ = descriptor; descriptor_data->num_kevents_ = 1; descriptor_data->shutdown_ = false; descriptor_data->op_queue_[op_type].push(op); struct kevent events[1]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) return errno; return 0; } void kqueue_reactor::move_descriptor(socket_type, kqueue_reactor::per_descriptor_data& target_descriptor_data, kqueue_reactor::per_descriptor_data& source_descriptor_data) { target_descriptor_data = source_descriptor_data; source_descriptor_data = 0; } void kqueue_reactor::start_op(int op_type, socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative) { if (!descriptor_data) { op->ec_ = asio::error::bad_descriptor; post_immediate_completion(op, is_continuation); return; } mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (descriptor_data->shutdown_) { post_immediate_completion(op, is_continuation); return; } if (descriptor_data->op_queue_[op_type].empty()) { static const int num_kevents[max_ops] = { 1, 2, 1 }; if (allow_speculative && (op_type != read_op || descriptor_data->op_queue_[except_op].empty())) { if (op->perform()) { descriptor_lock.unlock(); io_service_.post_immediate_completion(op, is_continuation); return; } if (descriptor_data->num_kevents_ < num_kevents[op_type]) { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1) { descriptor_data->num_kevents_ = num_kevents[op_type]; } else { op->ec_ = asio::error_code(errno, asio::error::get_system_category()); io_service_.post_immediate_completion(op, is_continuation); return; } } } else { if (descriptor_data->num_kevents_ < num_kevents[op_type]) descriptor_data->num_kevents_ = num_kevents[op_type]; struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); } } descriptor_data->op_queue_[op_type].push(op); io_service_.work_started(); } void kqueue_reactor::cancel_ops(socket_type, kqueue_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_lock.unlock(); io_service_.post_deferred_completions(ops); } void kqueue_reactor::deregister_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, bool closing) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { if (closing) { // The descriptor will be automatically removed from the kqueue when it // is closed. } else { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); } op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; io_service_.post_deferred_completions(ops); } } void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); op_queue ops; for (int i = 0; i < max_ops; ++i) ops.push(descriptor_data->op_queue_[i]); descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); free_descriptor_state(descriptor_data); descriptor_data = 0; } } void kqueue_reactor::run(bool block, op_queue& ops) { mutex::scoped_lock lock(mutex_); // Determine how long to block while waiting for events. timespec timeout_buf = { 0, 0 }; timespec* timeout = block ? get_timeout(timeout_buf) : &timeout_buf; lock.unlock(); // Block on the kqueue descriptor. struct kevent events[128]; int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout); // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = reinterpret_cast(events[i].udata); if (ptr == &interrupter_) { interrupter_.reset(); } else { descriptor_state* descriptor_data = static_cast(ptr); mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (events[i].filter == EVFILT_WRITE && descriptor_data->num_kevents_ == 2 && descriptor_data->op_queue_[write_op].empty()) { // Some descriptor types, like serial ports, don't seem to support // EV_CLEAR with EVFILT_WRITE. Since we have no pending write // operations we'll remove the EVFILT_WRITE registration here so that // we don't end up in a tight spin. struct kevent delete_events[1]; ASIO_KQUEUE_EV_SET(&delete_events[0], descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0); descriptor_data->num_kevents_ = 1; } // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. #if defined(__NetBSD__) static const unsigned int filter[max_ops] = #else static const int filter[max_ops] = #endif { EVFILT_READ, EVFILT_WRITE, EVFILT_READ }; for (int j = max_ops - 1; j >= 0; --j) { if (events[i].filter == filter[j]) { if (j != except_op || events[i].flags & EV_OOBAND) { while (reactor_op* op = descriptor_data->op_queue_[j].front()) { if (events[i].flags & EV_ERROR) { op->ec_ = asio::error_code( static_cast(events[i].data), asio::error::get_system_category()); descriptor_data->op_queue_[j].pop(); ops.push(op); } if (op->perform()) { descriptor_data->op_queue_[j].pop(); ops.push(op); } else break; } } } } } } lock.lock(); timer_queues_.get_ready_timers(ops); } void kqueue_reactor::interrupt() { interrupter_.interrupt(); } int kqueue_reactor::do_kqueue_create() { int fd = ::kqueue(); if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue"); } return fd; } kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state() { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); return registered_descriptors_.alloc(); } void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s) { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); registered_descriptors_.free(s); } void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } timespec* kqueue_reactor::get_timeout(timespec& ts) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); ts.tv_sec = usec / 1000000; ts.tv_nsec = (usec % 1000000) * 1000; return &ts; } } // namespace detail } // namespace asio #undef ASIO_KQUEUE_EV_SET #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP galera-26.4.3/asio/asio/detail/impl/winsock_init.ipp0000664000177500017540000000357413540715002020672 0ustar dbartmy// // detail/impl/winsock_init.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP #define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void winsock_init_base::startup(data& d, unsigned char major, unsigned char minor) { if (::InterlockedIncrement(&d.init_count_) == 1) { WSADATA wsa_data; long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data); ::InterlockedExchange(&d.result_, result); } } void winsock_init_base::manual_startup(data& d) { if (::InterlockedIncrement(&d.init_count_) == 1) { ::InterlockedExchange(&d.result_, 0); } } void winsock_init_base::cleanup(data& d) { if (::InterlockedDecrement(&d.init_count_) == 0) { ::WSACleanup(); } } void winsock_init_base::manual_cleanup(data& d) { ::InterlockedDecrement(&d.init_count_); } void winsock_init_base::throw_on_error(data& d) { long result = ::InterlockedExchangeAdd(&d.result_, 0); if (result != 0) { asio::error_code ec(result, asio::error::get_system_category()); asio::detail::throw_error(ec, "winsock"); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP galera-26.4.3/asio/asio/detail/impl/socket_ops.ipp0000664000177500017540000027354613540715002020353 0ustar dbartmy// // detail/impl/socket_ops.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPS_IPP #define ASIO_DETAIL_SOCKET_OPS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include #include "asio/detail/assert.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include # include # include #endif // defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \ || defined(__MACH__) && defined(__APPLE__) # if defined(ASIO_HAS_PTHREADS) # include # endif // defined(ASIO_HAS_PTHREADS) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // || defined(__MACH__) && defined(__APPLE__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_ops { #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) struct msghdr { int msg_namelen; }; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if defined(__hpux) // HP-UX doesn't declare these functions extern "C", so they are declared again // here to avoid linker errors about undefined symbols. extern "C" char* if_indextoname(unsigned int, char*); extern "C" unsigned int if_nametoindex(const char*); #endif // defined(__hpux) #endif // !defined(ASIO_WINDOWS_RUNTIME) inline void clear_last_error() { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) WSASetLastError(0); #else errno = 0; #endif } #if !defined(ASIO_WINDOWS_RUNTIME) template inline ReturnType error_wrapper(ReturnType return_value, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(WSAGetLastError(), asio::error::get_system_category()); #else ec = asio::error_code(errno, asio::error::get_system_category()); #endif return return_value; } template inline socket_type call_accept(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0; socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0); if (addrlen) *addrlen = (std::size_t)tmp_addrlen; return result; } socket_type accept(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return invalid_socket; } clear_last_error(); socket_type new_s = error_wrapper(call_accept( &msghdr::msg_namelen, s, addr, addrlen), ec); if (new_s == invalid_socket) return new_s; #if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) int optval = 1; int result = error_wrapper(::setsockopt(new_s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); if (result != 0) { ::close(new_s); return invalid_socket; } #endif ec = asio::error_code(); return new_s; } socket_type sync_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { // Accept a socket. for (;;) { // Try to complete the operation without blocking. socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec); // Check if operation succeeded. if (new_socket != invalid_socket) return new_socket; // Operation failed. if (ec == asio::error::would_block || ec == asio::error::try_again) { if (state & user_set_non_blocking) return invalid_socket; // Fall through to retry operation. } else if (ec == asio::error::connection_aborted) { if (state & enable_connection_aborted) return invalid_socket; // Fall through to retry operation. } #if defined(EPROTO) else if (ec.value() == EPROTO) { if (state & enable_connection_aborted) return invalid_socket; // Fall through to retry operation. } #endif // defined(EPROTO) else return invalid_socket; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return invalid_socket; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_accept(socket_type s, void* output_buffer, DWORD address_length, socket_addr_type* addr, std::size_t* addrlen, socket_type new_socket, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_aborted; if (!ec) { // Get the address of the peer. if (addr && addrlen) { LPSOCKADDR local_addr = 0; int local_addr_length = 0; LPSOCKADDR remote_addr = 0; int remote_addr_length = 0; GetAcceptExSockaddrs(output_buffer, 0, address_length, address_length, &local_addr, &local_addr_length, &remote_addr, &remote_addr_length); if (static_cast(remote_addr_length) > *addrlen) { ec = asio::error::invalid_argument; } else { using namespace std; // For memcpy. memcpy(addr, remote_addr, remote_addr_length); *addrlen = static_cast(remote_addr_length); } } // Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname // and getpeername will work on the accepted socket. SOCKET update_ctx_param = s; socket_ops::state_type state = 0; socket_ops::setsockopt(new_socket, state, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, &update_ctx_param, sizeof(SOCKET), ec); } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, socket_type& new_socket) { for (;;) { // Accept the waiting connection. new_socket = socket_ops::accept(s, addr, addrlen, ec); // Check if operation succeeded. if (new_socket != invalid_socket) return true; // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Operation failed. if (ec == asio::error::would_block || ec == asio::error::try_again) { if (state & user_set_non_blocking) return true; // Fall through to retry operation. } else if (ec == asio::error::connection_aborted) { if (state & enable_connection_aborted) return true; // Fall through to retry operation. } #if defined(EPROTO) else if (ec.value() == EPROTO) { if (state & enable_connection_aborted) return true; // Fall through to retry operation. } #endif // defined(EPROTO) else return true; return false; } } #endif // defined(ASIO_HAS_IOCP) template inline int call_bind(SockLenType msghdr::*, socket_type s, const socket_addr_type* addr, std::size_t addrlen) { return ::bind(s, addr, (SockLenType)addrlen); } int bind(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_bind( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } int close(socket_type s, state_type& state, bool destruction, asio::error_code& ec) { int result = 0; if (s != invalid_socket) { // We don't want the destructor to block, so set the socket to linger in // the background. If the user doesn't like this behaviour then they need // to explicitly close the socket. if (destruction && (state & user_set_linger)) { ::linger opt; opt.l_onoff = 0; opt.l_linger = 0; asio::error_code ignored_ec; socket_ops::setsockopt(s, state, SOL_SOCKET, SO_LINGER, &opt, sizeof(opt), ignored_ec); } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::closesocket(s), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::close(s), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result != 0 && (ec == asio::error::would_block || ec == asio::error::try_again)) { // According to UNIX Network Programming Vol. 1, it is possible for // close() to fail with EWOULDBLOCK under certain circumstances. What // isn't clear is the state of the descriptor after this error. The one // current OS where this behaviour is seen, Windows, says that the socket // remains open. Therefore we'll put the descriptor back into blocking // mode and have another attempt at closing it. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = 0; ::ioctlsocket(s, FIONBIO, &arg); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(__SYMBIAN32__) int flags = ::fcntl(s, F_GETFL, 0); if (flags >= 0) ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK); # else // defined(__SYMBIAN32__) ioctl_arg_type arg = 0; ::ioctl(s, FIONBIO, &arg); # endif // defined(__SYMBIAN32__) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) state &= ~non_blocking; clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::closesocket(s), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::close(s), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } } if (result == 0) ec = asio::error_code(); return result; } bool set_user_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); #elif defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); if (result >= 0) { clear_last_error(); int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); } #else ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); #endif if (result >= 0) { ec = asio::error_code(); if (value) state |= user_set_non_blocking; else { // Clearing the user-set non-blocking mode always overrides any // internally-set non-blocking flag. Any subsequent asynchronous // operations will need to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } return true; } return false; } bool set_internal_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } if (!value && (state & user_set_non_blocking)) { // It does not make sense to clear the internal non-blocking flag if the // user still wants non-blocking behaviour. Return an error and let the // caller figure out whether to update the user-set non-blocking flag. ec = asio::error::invalid_argument; return false; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); #elif defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); if (result >= 0) { clear_last_error(); int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); } #else ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); #endif if (result >= 0) { ec = asio::error_code(); if (value) state |= internal_non_blocking; else state &= ~internal_non_blocking; return true; } return false; } int shutdown(socket_type s, int what, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(::shutdown(s, what), ec); if (result == 0) ec = asio::error_code(); return result; } template inline int call_connect(SockLenType msghdr::*, socket_type s, const socket_addr_type* addr, std::size_t addrlen) { return ::connect(s, addr, (SockLenType)addrlen); } int connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_connect( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); #if defined(__linux__) else if (ec == asio::error::try_again) ec = asio::error::no_buffer_space; #endif // defined(__linux__) return result; } void sync_connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { // Perform the connect operation. socket_ops::connect(s, addr, addrlen, ec); if (ec != asio::error::in_progress && ec != asio::error::would_block) { // The connect operation finished immediately. return; } // Wait for socket to become ready. if (socket_ops::poll_connect(s, ec) < 0) return; // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec) == socket_error_retval) return; // Return the result of the connect operation. ec = asio::error_code(connect_error, asio::error::get_system_category()); } #if defined(ASIO_HAS_IOCP) void complete_iocp_connect(socket_type s, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. switch (ec.value()) { case ERROR_CONNECTION_REFUSED: ec = asio::error::connection_refused; break; case ERROR_NETWORK_UNREACHABLE: ec = asio::error::network_unreachable; break; case ERROR_HOST_UNREACHABLE: ec = asio::error::host_unreachable; break; case ERROR_SEM_TIMEOUT: ec = asio::error::timed_out; break; default: break; } if (!ec) { // Need to set the SO_UPDATE_CONNECT_CONTEXT option so that getsockname // and getpeername will work on the connected socket. socket_ops::state_type state = 0; const int so_update_connect_context = 0x7010; socket_ops::setsockopt(s, state, SOL_SOCKET, so_update_connect_context, 0, 0, ec); } } #endif // defined(ASIO_HAS_IOCP) bool non_blocking_connect(socket_type s, asio::error_code& ec) { // Check if the connect operation has finished. This is required since we may // get spurious readiness notifications from the reactor. #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set write_fds; FD_ZERO(&write_fds); FD_SET(s, &write_fds); fd_set except_fds; FD_ZERO(&except_fds); FD_SET(s, &except_fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; int ready = ::select(s + 1, 0, &write_fds, &except_fds, &zero_timeout); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; int ready = ::poll(&fds, 1, 0); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (ready == 0) { // The asynchronous connect operation is still in progress. return false; } // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec) == 0) { if (connect_error) { ec = asio::error_code(connect_error, asio::error::get_system_category()); } else ec = asio::error_code(); } return true; } int socketpair(int af, int type, int protocol, socket_type sv[2], asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(af); (void)(type); (void)(protocol); (void)(sv); ec = asio::error::operation_not_supported; return socket_error_retval; #else clear_last_error(); int result = error_wrapper(::socketpair(af, type, protocol, sv), ec); if (result == 0) ec = asio::error_code(); return result; #endif } bool sockatmark(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } #if defined(SIOCATMARK) ioctl_arg_type value = 0; # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, SIOCATMARK, &value), ec); # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctl(s, SIOCATMARK, &value), ec); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result == 0) ec = asio::error_code(); # if defined(ENOTTY) if (ec.value() == ENOTTY) ec = asio::error::not_socket; # endif // defined(ENOTTY) #else // defined(SIOCATMARK) int value = error_wrapper(::sockatmark(s), ec); if (value != -1) ec = asio::error_code(); #endif // defined(SIOCATMARK) return ec ? false : value != 0; } size_t available(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } ioctl_arg_type value = 0; #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, FIONREAD, &value), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctl(s, FIONREAD, &value), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result == 0) ec = asio::error_code(); #if defined(ENOTTY) if (ec.value() == ENOTTY) ec = asio::error::not_socket; #endif // defined(ENOTTY) return ec ? static_cast(0) : static_cast(value); } int listen(socket_type s, int backlog, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(::listen(s, backlog), ec); if (result == 0) ec = asio::error_code(); return result; } inline void init_buf_iov_base(void*& base, void* addr) { base = addr; } template inline void init_buf_iov_base(T& base, void* addr) { base = static_cast(addr); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef WSABUF buf; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef iovec buf; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) void init_buf(buf& b, void* data, size_t size) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) b.buf = static_cast(data); b.len = static_cast(size); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) init_buf_iov_base(b.iov_base, data); b.iov_len = size; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } void init_buf(buf& b, const void* data, size_t size) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) b.buf = static_cast(const_cast(data)); b.len = static_cast(size); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) init_buf_iov_base(b.iov_base, const_cast(data)); b.iov_len = size; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } inline void init_msghdr_msg_name(void*& name, socket_addr_type* addr) { name = addr; } inline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr) { name = const_cast(addr); } template inline void init_msghdr_msg_name(T& name, socket_addr_type* addr) { name = reinterpret_cast(addr); } template inline void init_msghdr_msg_name(T& name, const socket_addr_type* addr) { name = reinterpret_cast(const_cast(addr)); } signed_size_type recv(socket_type s, buf* bufs, size_t count, int flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Receive some data. DWORD recv_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = error_wrapper(::WSARecv(s, bufs, recv_buf_count, &bytes_transferred, &recv_flags, 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recv(socket_type s, state_type state, buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream is a no-op. if (all_empty && (state & stream_oriented)) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Check for EOF. if ((state & stream_oriented) && bytes == 0) { ec = asio::error::eof; return 0; } // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recv(state_type state, const weak_cancel_token_type& cancel_token, bool all_empty, asio::error_code& ec, size_t bytes_transferred) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } // Check for connection closed. else if (!ec && bytes_transferred == 0 && (state & stream_oriented) != 0 && !all_empty) { ec = asio::error::eof; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recv(socket_type s, buf* bufs, size_t count, int flags, bool is_stream, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); // Check for end of stream. if (is_stream && bytes == 0) { ec = asio::error::eof; return true; } // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Receive some data. DWORD recv_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD recv_flags = flags; int tmp_addrlen = (int)*addrlen; int result = error_wrapper(::WSARecvFrom(s, bufs, recv_buf_count, &bytes_transferred, &recv_flags, addr, &tmp_addrlen, 0, 0), ec); *addrlen = (std::size_t)tmp_addrlen; if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); init_msghdr_msg_name(msg.msg_name, addr); msg.msg_namelen = static_cast(*addrlen); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); *addrlen = msg.msg_namelen; if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recvfrom( s, bufs, count, flags, addr, addrlen, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recvfrom( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recvfrom( s, bufs, count, flags, addr, addrlen, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) out_flags = 0; return socket_ops::recv(s, bufs, count, in_flags, ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, in_flags), ec); if (result >= 0) { ec = asio::error_code(); out_flags = msg.msg_flags; } else out_flags = 0; return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recvmsg(socket_type s, state_type state, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recvmsg( s, bufs, count, in_flags, out_flags, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recvmsg( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recvmsg( s, bufs, count, in_flags, out_flags, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Send the data. DWORD send_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD send_flags = flags; int result = error_wrapper(::WSASend(s, const_cast(bufs), send_buf_count, &bytes_transferred, send_flags, 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = const_cast(bufs); msg.msg_iovlen = static_cast(count); #if defined(__linux__) flags |= MSG_NOSIGNAL; #endif // defined(__linux__) signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_send(socket_type s, state_type state, const buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes to a stream is a no-op. if (all_empty && (state & stream_oriented)) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_write(s, 0, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_send( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Write some data. signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Send the data. DWORD send_buf_count = static_cast(count); DWORD bytes_transferred = 0; int result = error_wrapper(::WSASendTo(s, const_cast(bufs), send_buf_count, &bytes_transferred, flags, addr, static_cast(addrlen), 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); init_msghdr_msg_name(msg.msg_name, addr); msg.msg_namelen = static_cast(addrlen); msg.msg_iov = const_cast(bufs); msg.msg_iovlen = static_cast(count); #if defined(__linux__) flags |= MSG_NOSIGNAL; #endif // defined(__linux__) signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_sendto(socket_type s, state_type state, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Write some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::sendto( s, bufs, count, flags, addr, addrlen, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_write(s, 0, ec) < 0) return 0; } } #if !defined(ASIO_HAS_IOCP) bool non_blocking_sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Write some data. signed_size_type bytes = socket_ops::sendto( s, bufs, count, flags, addr, addrlen, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // !defined(ASIO_HAS_IOCP) socket_type socket(int af, int type, int protocol, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) socket_type s = error_wrapper(::WSASocketW(af, type, protocol, 0, 0, WSA_FLAG_OVERLAPPED), ec); if (s == invalid_socket) return s; if (af == ASIO_OS_DEF(AF_INET6)) { // Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to // false. This will only succeed on Windows Vista and later versions of // Windows, where a dual-stack IPv4/v6 implementation is available. DWORD optval = 0; ::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&optval), sizeof(optval)); } ec = asio::error_code(); return s; #elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) socket_type s = error_wrapper(::socket(af, type, protocol), ec); if (s == invalid_socket) return s; int optval = 1; int result = error_wrapper(::setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); if (result != 0) { ::close(s); return invalid_socket; } return s; #else int s = error_wrapper(::socket(af, type, protocol), ec); if (s >= 0) ec = asio::error_code(); return s; #endif } template inline int call_setsockopt(SockLenType msghdr::*, socket_type s, int level, int optname, const void* optval, std::size_t optlen) { return ::setsockopt(s, level, optname, (const char*)optval, (SockLenType)optlen); } int setsockopt(socket_type s, state_type& state, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } if (level == custom_socket_option_level && optname == always_fail_option) { ec = asio::error::invalid_argument; return socket_error_retval; } if (level == custom_socket_option_level && optname == enable_connection_aborted_option) { if (optlen != sizeof(int)) { ec = asio::error::invalid_argument; return socket_error_retval; } if (*static_cast(optval)) state |= enable_connection_aborted; else state &= ~enable_connection_aborted; ec = asio::error_code(); return 0; } if (level == SOL_SOCKET && optname == SO_LINGER) state |= user_set_linger; #if defined(__BORLANDC__) // Mysteriously, using the getsockopt and setsockopt functions directly with // Borland C++ results in incorrect values being set and read. The bug can be // worked around by using function addresses resolved with GetProcAddress. if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int); if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, "setsockopt")) { clear_last_error(); return error_wrapper(sso(s, level, optname, reinterpret_cast(optval), static_cast(optlen)), ec); } } ec = asio::error::fault; return socket_error_retval; #else // defined(__BORLANDC__) clear_last_error(); int result = error_wrapper(call_setsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); if (result == 0) { ec = asio::error_code(); #if defined(__MACH__) && defined(__APPLE__) \ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) // To implement portable behaviour for SO_REUSEADDR with UDP sockets we // need to also set SO_REUSEPORT on BSD-based platforms. if ((state & datagram_oriented) && level == SOL_SOCKET && optname == SO_REUSEADDR) { call_setsockopt(&msghdr::msg_namelen, s, SOL_SOCKET, SO_REUSEPORT, optval, optlen); } #endif } return result; #endif // defined(__BORLANDC__) } template inline int call_getsockopt(SockLenType msghdr::*, socket_type s, int level, int optname, void* optval, std::size_t* optlen) { SockLenType tmp_optlen = (SockLenType)*optlen; int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen); *optlen = (std::size_t)tmp_optlen; return result; } int getsockopt(socket_type s, state_type state, int level, int optname, void* optval, size_t* optlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } if (level == custom_socket_option_level && optname == always_fail_option) { ec = asio::error::invalid_argument; return socket_error_retval; } if (level == custom_socket_option_level && optname == enable_connection_aborted_option) { if (*optlen != sizeof(int)) { ec = asio::error::invalid_argument; return socket_error_retval; } *static_cast(optval) = (state & enable_connection_aborted) ? 1 : 0; ec = asio::error_code(); return 0; } #if defined(__BORLANDC__) // Mysteriously, using the getsockopt and setsockopt functions directly with // Borland C++ results in incorrect values being set and read. The bug can be // worked around by using function addresses resolved with GetProcAddress. if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*); if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, "getsockopt")) { clear_last_error(); int tmp_optlen = static_cast(*optlen); int result = error_wrapper(gso(s, level, optname, reinterpret_cast(optval), &tmp_optlen), ec); *optlen = static_cast(tmp_optlen); if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) { // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are // only supported on Windows Vista and later. To simplify program logic // we will fake success of getting this option and specify that the // value is non-zero (i.e. true). This corresponds to the behavior of // IPv6 sockets on Windows platforms pre-Vista. *static_cast(optval) = 1; ec = asio::error_code(); } return result; } } ec = asio::error::fault; return socket_error_retval; #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) { // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only // supported on Windows Vista and later. To simplify program logic we will // fake success of getting this option and specify that the value is // non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets // on Windows platforms pre-Vista. *static_cast(optval) = 1; ec = asio::error_code(); } if (result == 0) ec = asio::error_code(); return result; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); #if defined(__linux__) if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int) && (optname == SO_SNDBUF || optname == SO_RCVBUF)) { // On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel // to set the buffer size to N*2. Linux puts additional stuff into the // buffers so that only about half is actually available to the application. // The retrieved value is divided by 2 here to make it appear as though the // correct value has been set. *static_cast(optval) /= 2; } #endif // defined(__linux__) if (result == 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } template inline int call_getpeername(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = (SockLenType)*addrlen; int result = ::getpeername(s, addr, &tmp_addrlen); *addrlen = (std::size_t)tmp_addrlen; return result; } int getpeername(socket_type s, socket_addr_type* addr, std::size_t* addrlen, bool cached, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) \ || defined(__CYGWIN__) if (cached) { // Check if socket is still connected. DWORD connect_time = 0; size_t connect_time_len = sizeof(connect_time); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME, &connect_time, &connect_time_len, ec) == socket_error_retval) { return socket_error_retval; } if (connect_time == 0xFFFFFFFF) { ec = asio::error::not_connected; return socket_error_retval; } // The cached value is still valid. ec = asio::error_code(); return 0; } #else // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) // || defined(__CYGWIN__) (void)cached; #endif // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) // || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getpeername( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } template inline int call_getsockname(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = (SockLenType)*addrlen; int result = ::getsockname(s, addr, &tmp_addrlen); *addrlen = (std::size_t)tmp_addrlen; return result; } int getsockname(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_getsockname( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } int ioctl(socket_type s, state_type& state, int cmd, ioctl_arg_type* arg, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, cmd, arg), ec); #elif defined(__MACH__) && defined(__APPLE__) \ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) int result = error_wrapper(::ioctl(s, static_cast(cmd), arg), ec); #else int result = error_wrapper(::ioctl(s, cmd, arg), ec); #endif if (result >= 0) { ec = asio::error_code(); // When updating the non-blocking mode we always perform the ioctl syscall, // even if the flags would otherwise indicate that the socket is already in // the correct state. This ensures that the underlying socket is put into // the state that has been requested by the user. If the ioctl syscall was // successful then we need to update the flags to match. if (cmd == static_cast(FIONBIO)) { if (*arg) { state |= user_set_non_blocking; } else { // Clearing the non-blocking mode always overrides any internally-set // non-blocking flag. Any subsequent asynchronous operations will need // to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } } } return result; } int select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, timeval* timeout, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (!readfds && !writefds && !exceptfds && timeout) { DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; if (milliseconds == 0) milliseconds = 1; // Force context switch. ::Sleep(milliseconds); ec = asio::error_code(); return 0; } // The select() call allows timeout values measured in microseconds, but the // system clock (as wrapped by boost::posix_time::microsec_clock) typically // has a resolution of 10 milliseconds. This can lead to a spinning select // reactor, meaning increased CPU usage, when waiting for the earliest // scheduled timeout if it's less than 10 milliseconds away. To avoid a tight // spin we'll use a minimum timeout of 1 millisecond. if (timeout && timeout->tv_sec == 0 && timeout->tv_usec > 0 && timeout->tv_usec < 1000) timeout->tv_usec = 1000; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if defined(__hpux) && defined(__SELECT) timespec ts; ts.tv_sec = timeout ? timeout->tv_sec : 0; ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0; return error_wrapper(::pselect(nfds, readfds, writefds, exceptfds, timeout ? &ts : 0, 0), ec); #else int result = error_wrapper(::select(nfds, readfds, writefds, exceptfds, timeout), ec); if (result >= 0) ec = asio::error_code(); return result; #endif } int poll_read(socket_type s, state_type state, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0; clear_last_error(); int result = error_wrapper(::select(s + 1, &fds, 0, 0, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLIN; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_write(socket_type s, state_type state, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0; clear_last_error(); int result = error_wrapper(::select(s + 1, 0, &fds, 0, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_connect(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set write_fds; FD_ZERO(&write_fds); FD_SET(s, &write_fds); fd_set except_fds; FD_ZERO(&except_fds); FD_SET(s, &except_fds); clear_last_error(); int result = error_wrapper(::select( s + 1, 0, &write_fds, &except_fds, 0), ec); if (result >= 0) ec = asio::error_code(); return result; #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, -1), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) } #endif // !defined(ASIO_WINDOWS_RUNTIME) const char* inet_ntop(int af, const void* src, char* dest, size_t length, unsigned long scope_id, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) using namespace std; // For sprintf. const unsigned char* bytes = static_cast(src); if (af == ASIO_OS_DEF(AF_INET)) { sprintf_s(dest, length, "%u.%u.%u.%u", bytes[0], bytes[1], bytes[2], bytes[3]); return dest; } else if (af == ASIO_OS_DEF(AF_INET6)) { size_t n = 0, b = 0, z = 0; while (n < length && b < 16) { if (bytes[b] == 0 && bytes[b + 1] == 0 && z == 0) { do b += 2; while (b < 16 && bytes[b] == 0 && bytes[b + 1] == 0); n += sprintf_s(dest + n, length - n, ":%s", b < 16 ? "" : ":"), ++z; } else { n += sprintf_s(dest + n, length - n, "%s%x", b ? ":" : "", (static_cast(bytes[b]) << 8) | bytes[b + 1]); b += 2; } } if (scope_id) n += sprintf_s(dest + n, length - n, "%%%lu", scope_id); return dest; } else { ec = asio::error::address_family_not_supported; return 0; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For memcpy. if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) { ec = asio::error::address_family_not_supported; return 0; } union { socket_addr_type base; sockaddr_storage_type storage; sockaddr_in4_type v4; sockaddr_in6_type v6; } address; DWORD address_length; if (af == ASIO_OS_DEF(AF_INET)) { address_length = sizeof(sockaddr_in4_type); address.v4.sin_family = ASIO_OS_DEF(AF_INET); address.v4.sin_port = 0; memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type)); } else // AF_INET6 { address_length = sizeof(sockaddr_in6_type); address.v6.sin6_family = ASIO_OS_DEF(AF_INET6); address.v6.sin6_port = 0; address.v6.sin6_flowinfo = 0; address.v6.sin6_scope_id = scope_id; memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type)); } DWORD string_length = static_cast(length); #if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR)); int result = error_wrapper(::WSAAddressToStringW(&address.base, address_length, 0, string_buffer, &string_length), ec); ::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1, dest, static_cast(length), 0, 0); #else int result = error_wrapper(::WSAAddressToStringA( &address.base, address_length, 0, dest, &string_length), ec); #endif // Windows may set error code on success. if (result != socket_error_retval) ec = asio::error_code(); // Windows may not set an error code on failure. else if (result == socket_error_retval && !ec) ec = asio::error::invalid_argument; return result == socket_error_retval ? 0 : dest; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) const char* result = error_wrapper(::inet_ntop( af, src, dest, static_cast(length)), ec); if (result == 0 && !ec) ec = asio::error::invalid_argument; if (result != 0 && af == ASIO_OS_DEF(AF_INET6) && scope_id != 0) { using namespace std; // For strcat and sprintf. char if_name[IF_NAMESIZE + 1] = "%"; const in6_addr_type* ipv6_address = static_cast(src); bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); if ((!is_link_local && !is_multicast_link_local) || if_indextoname(static_cast(scope_id), if_name + 1) == 0) sprintf(if_name + 1, "%lu", scope_id); strcat(dest, if_name); } return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } int inet_pton(int af, const char* src, void* dest, unsigned long* scope_id, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) using namespace std; // For sscanf. unsigned char* bytes = static_cast(dest); if (af == ASIO_OS_DEF(AF_INET)) { unsigned int b0, b1, b2, b3; if (sscanf_s(src, "%u.%u.%u.%u", &b0, &b1, &b2, &b3) != 4) { ec = asio::error::invalid_argument; return -1; } if (b0 > 255 || b1 > 255 || b2 > 255 || b3 > 255) { ec = asio::error::invalid_argument; return -1; } bytes[0] = static_cast(b0); bytes[1] = static_cast(b1); bytes[2] = static_cast(b2); bytes[3] = static_cast(b3); ec = asio::error_code(); return 1; } else if (af == ASIO_OS_DEF(AF_INET6)) { unsigned char* bytes = static_cast(dest); std::memset(bytes, 0, 16); unsigned char back_bytes[16] = { 0 }; int num_front_bytes = 0, num_back_bytes = 0; const char* p = src; enum { fword, fcolon, bword, scope, done } state = fword; unsigned long current_word = 0; while (state != done) { if (current_word > 0xFFFF) { ec = asio::error::invalid_argument; return -1; } switch (state) { case fword: if (*p >= '0' && *p <= '9') current_word = current_word * 16 + *p++ - '0'; else if (*p >= 'a' && *p <= 'f') current_word = current_word * 16 + *p++ - 'a' + 10; else if (*p >= 'A' && *p <= 'F') current_word = current_word * 16 + *p++ - 'A' + 10; else { if (num_front_bytes == 16) { ec = asio::error::invalid_argument; return -1; } bytes[num_front_bytes++] = (current_word >> 8) & 0xFF; bytes[num_front_bytes++] = current_word & 0xFF; current_word = 0; if (*p == ':') state = fcolon, ++p; else if (*p == '%') state = scope, ++p; else if (*p == 0) state = done; else { ec = asio::error::invalid_argument; return -1; } } break; case fcolon: if (*p == ':') state = bword, ++p; else state = fword; break; case bword: if (*p >= '0' && *p <= '9') current_word = current_word * 16 + *p++ - '0'; else if (*p >= 'a' && *p <= 'f') current_word = current_word * 16 + *p++ - 'a' + 10; else if (*p >= 'A' && *p <= 'F') current_word = current_word * 16 + *p++ - 'A' + 10; else { if (num_front_bytes + num_back_bytes == 16) { ec = asio::error::invalid_argument; return -1; } back_bytes[num_back_bytes++] = (current_word >> 8) & 0xFF; back_bytes[num_back_bytes++] = current_word & 0xFF; current_word = 0; if (*p == ':') state = bword, ++p; else if (*p == '%') state = scope, ++p; else if (*p == 0) state = done; else { ec = asio::error::invalid_argument; return -1; } } break; case scope: if (*p >= '0' && *p <= '9') current_word = current_word * 10 + *p++ - '0'; else if (*p == 0) *scope_id = current_word, state = done; else { ec = asio::error::invalid_argument; return -1; } break; default: break; } } for (int i = 0; i < num_back_bytes; ++i) bytes[16 - num_back_bytes + i] = back_bytes[i]; ec = asio::error_code(); return 1; } else { ec = asio::error::address_family_not_supported; return -1; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For memcpy and strcmp. if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) { ec = asio::error::address_family_not_supported; return -1; } union { socket_addr_type base; sockaddr_storage_type storage; sockaddr_in4_type v4; sockaddr_in6_type v6; } address; int address_length = sizeof(sockaddr_storage_type); #if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) int num_wide_chars = static_cast(strlen(src)) + 1; LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR)); ::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars); int result = error_wrapper(::WSAStringToAddressW( wide_buffer, af, 0, &address.base, &address_length), ec); #else int result = error_wrapper(::WSAStringToAddressA( const_cast(src), af, 0, &address.base, &address_length), ec); #endif if (af == ASIO_OS_DEF(AF_INET)) { if (result != socket_error_retval) { memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type)); ec = asio::error_code(); } else if (strcmp(src, "255.255.255.255") == 0) { static_cast(dest)->s_addr = INADDR_NONE; ec = asio::error_code(); } } else // AF_INET6 { if (result != socket_error_retval) { memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type)); if (scope_id) *scope_id = address.v6.sin6_scope_id; ec = asio::error_code(); } } // Windows may not set an error code on failure. if (result == socket_error_retval && !ec) ec = asio::error::invalid_argument; if (result != socket_error_retval) ec = asio::error_code(); return result == socket_error_retval ? -1 : 1; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For strchr, memcpy and atoi. // On some platforms, inet_pton fails if an address string contains a scope // id. Detect and remove the scope id before passing the string to inet_pton. const bool is_v6 = (af == ASIO_OS_DEF(AF_INET6)); const char* if_name = is_v6 ? strchr(src, '%') : 0; char src_buf[max_addr_v6_str_len + 1]; const char* src_ptr = src; if (if_name != 0) { if (if_name - src > max_addr_v6_str_len) { ec = asio::error::invalid_argument; return 0; } memcpy(src_buf, src, if_name - src); src_buf[if_name - src] = 0; src_ptr = src_buf; } int result = error_wrapper(::inet_pton(af, src_ptr, dest), ec); if (result <= 0 && !ec) ec = asio::error::invalid_argument; if (result > 0 && is_v6 && scope_id) { using namespace std; // For strchr and atoi. *scope_id = 0; if (if_name != 0) { in6_addr_type* ipv6_address = static_cast(dest); bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); if (is_link_local || is_multicast_link_local) *scope_id = if_nametoindex(if_name + 1); if (*scope_id == 0) *scope_id = atoi(if_name + 1); } } return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } int gethostname(char* name, int namelen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) try { using namespace Windows::Foundation::Collections; using namespace Windows::Networking; using namespace Windows::Networking::Connectivity; IVectorView^ hostnames = NetworkInformation::GetHostNames(); for (unsigned i = 0; i < hostnames->Size; ++i) { HostName^ hostname = hostnames->GetAt(i); if (hostname->Type == HostNameType::DomainName) { std::wstring_convert> converter; std::string raw_name = converter.to_bytes(hostname->RawName->Data()); if (namelen > 0 && raw_name.size() < static_cast(namelen)) { strcpy_s(name, namelen, raw_name.c_str()); return 0; } } } return -1; } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return -1; } #else // defined(ASIO_WINDOWS_RUNTIME) int result = error_wrapper(::gethostname(name, namelen), ec); # if defined(ASIO_WINDOWS) if (result == 0) ec = asio::error_code(); # endif // defined(ASIO_WINDOWS) return result; #endif // defined(ASIO_WINDOWS_RUNTIME) } #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(ASIO_HAS_GETADDRINFO) // The following functions are only needed for emulation of getaddrinfo and // getnameinfo. inline asio::error_code translate_netdb_error(int error) { switch (error) { case 0: return asio::error_code(); case HOST_NOT_FOUND: return asio::error::host_not_found; case TRY_AGAIN: return asio::error::host_not_found_try_again; case NO_RECOVERY: return asio::error::no_recovery; case NO_DATA: return asio::error::no_data; default: ASIO_ASSERT(false); return asio::error::invalid_argument; } } inline hostent* gethostbyaddr(const char* addr, int length, int af, hostent* result, char* buffer, int buflength, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(buffer); (void)(buflength); hostent* retval = error_wrapper(::gethostbyaddr(addr, length, af), ec); if (!retval) return 0; ec = asio::error_code(); *result = *retval; return retval; #elif defined(__sun) || defined(__QNX__) int error = 0; hostent* retval = error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, buflength, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #elif defined(__MACH__) && defined(__APPLE__) (void)(buffer); (void)(buflength); int error = 0; hostent* retval = error_wrapper(::getipnodebyaddr( addr, length, af, &error), ec); if (error) ec = translate_netdb_error(error); if (!retval) return 0; *result = *retval; return retval; #else hostent* retval = 0; int error = 0; error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, buflength, &retval, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #endif } inline hostent* gethostbyname(const char* name, int af, struct hostent* result, char* buffer, int buflength, int ai_flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(buffer); (void)(buflength); (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } hostent* retval = error_wrapper(::gethostbyname(name), ec); if (!retval) return 0; ec = asio::error_code(); *result = *retval; return result; #elif defined(__sun) || defined(__QNX__) (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } int error = 0; hostent* retval = error_wrapper(::gethostbyname_r(name, result, buffer, buflength, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #elif defined(__MACH__) && defined(__APPLE__) (void)(buffer); (void)(buflength); int error = 0; hostent* retval = error_wrapper(::getipnodebyname( name, af, ai_flags, &error), ec); if (error) ec = translate_netdb_error(error); if (!retval) return 0; *result = *retval; return retval; #else (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } hostent* retval = 0; int error = 0; error_wrapper(::gethostbyname_r(name, result, buffer, buflength, &retval, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #endif } inline void freehostent(hostent* h) { #if defined(__MACH__) && defined(__APPLE__) if (h) ::freehostent(h); #else (void)(h); #endif } // Emulation of getaddrinfo based on implementation in: // Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998. struct gai_search { const char* host; int family; }; inline int gai_nsearch(const char* host, const addrinfo_type* hints, gai_search (&search)[2]) { int search_count = 0; if (host == 0 || host[0] == '\0') { if (hints->ai_flags & AI_PASSIVE) { // No host and AI_PASSIVE implies wildcard bind. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = "0.0.0.0"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = "0::0"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = "0::0"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = "0.0.0.0"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } else { // No host and not AI_PASSIVE means connect to local host. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } } else { // Host is specified. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } return search_count; } template inline T* gai_alloc(std::size_t size = sizeof(T)) { using namespace std; T* p = static_cast(::operator new(size, std::nothrow)); if (p) memset(p, 0, size); return p; } inline void gai_free(void* p) { ::operator delete(p); } inline void gai_strcpy(char* target, const char* source, std::size_t max_size) { using namespace std; #if defined(ASIO_HAS_SECURE_RTL) strcpy_s(target, max_size, source); #else // defined(ASIO_HAS_SECURE_RTL) *target = 0; if (max_size > 0) strncat(target, source, max_size - 1); #endif // defined(ASIO_HAS_SECURE_RTL) } enum { gai_clone_flag = 1 << 30 }; inline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints, const void* addr, int family) { using namespace std; addrinfo_type* ai = gai_alloc(); if (ai == 0) return EAI_MEMORY; ai->ai_next = 0; **next = ai; *next = &ai->ai_next; ai->ai_canonname = 0; ai->ai_socktype = hints->ai_socktype; if (ai->ai_socktype == 0) ai->ai_flags |= gai_clone_flag; ai->ai_protocol = hints->ai_protocol; ai->ai_family = family; switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = gai_alloc(); if (sinptr == 0) return EAI_MEMORY; sinptr->sin_family = ASIO_OS_DEF(AF_INET); memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type)); ai->ai_addr = reinterpret_cast(sinptr); ai->ai_addrlen = sizeof(sockaddr_in4_type); break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = gai_alloc(); if (sin6ptr == 0) return EAI_MEMORY; sin6ptr->sin6_family = ASIO_OS_DEF(AF_INET6); memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type)); ai->ai_addr = reinterpret_cast(sin6ptr); ai->ai_addrlen = sizeof(sockaddr_in6_type); break; } default: break; } return 0; } inline addrinfo_type* gai_clone(addrinfo_type* ai) { using namespace std; addrinfo_type* new_ai = gai_alloc(); if (new_ai == 0) return new_ai; new_ai->ai_next = ai->ai_next; ai->ai_next = new_ai; new_ai->ai_flags = 0; new_ai->ai_family = ai->ai_family; new_ai->ai_socktype = ai->ai_socktype; new_ai->ai_protocol = ai->ai_protocol; new_ai->ai_canonname = 0; new_ai->ai_addrlen = ai->ai_addrlen; new_ai->ai_addr = gai_alloc(ai->ai_addrlen); memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen); return new_ai; } inline int gai_port(addrinfo_type* aihead, int port, int socktype) { int num_found = 0; for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next) { if (ai->ai_flags & gai_clone_flag) { if (ai->ai_socktype != 0) { ai = gai_clone(ai); if (ai == 0) return -1; // ai now points to newly cloned entry. } } else if (ai->ai_socktype != socktype) { // Ignore if mismatch on socket type. continue; } ai->ai_socktype = socktype; switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = reinterpret_cast(ai->ai_addr); sinptr->sin_port = port; ++num_found; break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = reinterpret_cast(ai->ai_addr); sin6ptr->sin6_port = port; ++num_found; break; } default: break; } } return num_found; } inline int gai_serv(addrinfo_type* aihead, const addrinfo_type* hints, const char* serv) { using namespace std; int num_found = 0; if ( #if defined(AI_NUMERICSERV) (hints->ai_flags & AI_NUMERICSERV) || #endif isdigit(static_cast(serv[0]))) { int port = htons(atoi(serv)); if (hints->ai_socktype) { // Caller specifies socket type. int rc = gai_port(aihead, port, hints->ai_socktype); if (rc < 0) return EAI_MEMORY; num_found += rc; } else { // Caller does not specify socket type. int rc = gai_port(aihead, port, SOCK_STREAM); if (rc < 0) return EAI_MEMORY; num_found += rc; rc = gai_port(aihead, port, SOCK_DGRAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } else { // Try service name with TCP first, then UDP. if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM) { servent* sptr = getservbyname(serv, "tcp"); if (sptr != 0) { int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM) { servent* sptr = getservbyname(serv, "udp"); if (sptr != 0) { int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } } if (num_found == 0) { if (hints->ai_socktype == 0) { // All calls to getservbyname() failed. return EAI_NONAME; } else { // Service not supported for socket type. return EAI_SERVICE; } } return 0; } inline int gai_echeck(const char* host, const char* service, int flags, int family, int socktype, int protocol) { (void)(flags); (void)(protocol); // Host or service must be specified. if (host == 0 || host[0] == '\0') if (service == 0 || service[0] == '\0') return EAI_NONAME; // Check combination of family and socket type. switch (family) { case ASIO_OS_DEF(AF_UNSPEC): break; case ASIO_OS_DEF(AF_INET): case ASIO_OS_DEF(AF_INET6): if (service != 0 && service[0] != '\0') if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM) return EAI_SOCKTYPE; break; default: return EAI_FAMILY; } return 0; } inline void freeaddrinfo_emulation(addrinfo_type* aihead) { addrinfo_type* ai = aihead; while (ai) { gai_free(ai->ai_addr); gai_free(ai->ai_canonname); addrinfo_type* ainext = ai->ai_next; gai_free(ai); ai = ainext; } } inline int getaddrinfo_emulation(const char* host, const char* service, const addrinfo_type* hintsp, addrinfo_type** result) { // Set up linked list of addrinfo structures. addrinfo_type* aihead = 0; addrinfo_type** ainext = &aihead; char* canon = 0; // Supply default hints if not specified by caller. addrinfo_type hints = addrinfo_type(); hints.ai_family = ASIO_OS_DEF(AF_UNSPEC); if (hintsp) hints = *hintsp; // If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED // and AI_ALL flags. #if defined(AI_V4MAPPED) if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) hints.ai_flags &= ~AI_V4MAPPED; #endif #if defined(AI_ALL) if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) hints.ai_flags &= ~AI_ALL; #endif // Basic error checking. int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family, hints.ai_socktype, hints.ai_protocol); if (rc != 0) { freeaddrinfo_emulation(aihead); return rc; } gai_search search[2]; int search_count = gai_nsearch(host, &hints, search); for (gai_search* sptr = search; sptr < search + search_count; ++sptr) { // Check for IPv4 dotted decimal string. in4_addr_type inaddr; asio::error_code ec; if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), sptr->host, &inaddr, 0, ec) == 1) { if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != ASIO_OS_DEF(AF_INET)) { freeaddrinfo_emulation(aihead); gai_free(canon); return EAI_FAMILY; } if (sptr->family == ASIO_OS_DEF(AF_INET)) { rc = gai_aistruct(&ainext, &hints, &inaddr, ASIO_OS_DEF(AF_INET)); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); return rc; } } continue; } // Check for IPv6 hex string. in6_addr_type in6addr; if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), sptr->host, &in6addr, 0, ec) == 1) { if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != ASIO_OS_DEF(AF_INET6)) { freeaddrinfo_emulation(aihead); gai_free(canon); return EAI_FAMILY; } if (sptr->family == ASIO_OS_DEF(AF_INET6)) { rc = gai_aistruct(&ainext, &hints, &in6addr, ASIO_OS_DEF(AF_INET6)); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); return rc; } } continue; } // Look up hostname. hostent hent; char hbuf[8192] = ""; hostent* hptr = socket_ops::gethostbyname(sptr->host, sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec); if (hptr == 0) { if (search_count == 2) { // Failure is OK if there are multiple searches. continue; } freeaddrinfo_emulation(aihead); gai_free(canon); if (ec == asio::error::host_not_found) return EAI_NONAME; if (ec == asio::error::host_not_found_try_again) return EAI_AGAIN; if (ec == asio::error::no_recovery) return EAI_FAIL; if (ec == asio::error::no_data) return EAI_NONAME; return EAI_NONAME; } // Check for address family mismatch if one was specified. if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != hptr->h_addrtype) { freeaddrinfo_emulation(aihead); gai_free(canon); socket_ops::freehostent(hptr); return EAI_FAMILY; } // Save canonical name first time. if (host != 0 && host[0] != '\0' && hptr->h_name && hptr->h_name[0] && (hints.ai_flags & AI_CANONNAME) && canon == 0) { std::size_t canon_len = strlen(hptr->h_name) + 1; canon = gai_alloc(canon_len); if (canon == 0) { freeaddrinfo_emulation(aihead); socket_ops::freehostent(hptr); return EAI_MEMORY; } gai_strcpy(canon, hptr->h_name, canon_len); } // Create an addrinfo structure for each returned address. for (char** ap = hptr->h_addr_list; *ap; ++ap) { rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); socket_ops::freehostent(hptr); return EAI_FAMILY; } } socket_ops::freehostent(hptr); } // Check if we found anything. if (aihead == 0) { gai_free(canon); return EAI_NONAME; } // Return canonical name in first entry. if (host != 0 && host[0] != '\0' && (hints.ai_flags & AI_CANONNAME)) { if (canon) { aihead->ai_canonname = canon; canon = 0; } else { std::size_t canonname_len = strlen(search[0].host) + 1; aihead->ai_canonname = gai_alloc(canonname_len); if (aihead->ai_canonname == 0) { freeaddrinfo_emulation(aihead); return EAI_MEMORY; } gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len); } } gai_free(canon); // Process the service name. if (service != 0 && service[0] != '\0') { rc = gai_serv(aihead, &hints, service); if (rc != 0) { freeaddrinfo_emulation(aihead); return rc; } } // Return result to caller. *result = aihead; return 0; } inline asio::error_code getnameinfo_emulation( const socket_addr_type* sa, std::size_t salen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec) { using namespace std; const char* addr; size_t addr_len; unsigned short port; switch (sa->sa_family) { case ASIO_OS_DEF(AF_INET): if (salen != sizeof(sockaddr_in4_type)) { return ec = asio::error::invalid_argument; } addr = reinterpret_cast( &reinterpret_cast(sa)->sin_addr); addr_len = sizeof(in4_addr_type); port = reinterpret_cast(sa)->sin_port; break; case ASIO_OS_DEF(AF_INET6): if (salen != sizeof(sockaddr_in6_type)) { return ec = asio::error::invalid_argument; } addr = reinterpret_cast( &reinterpret_cast(sa)->sin6_addr); addr_len = sizeof(in6_addr_type); port = reinterpret_cast(sa)->sin6_port; break; default: return ec = asio::error::address_family_not_supported; } if (host && hostlen > 0) { if (flags & NI_NUMERICHOST) { if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) { return ec; } } else { hostent hent; char hbuf[8192] = ""; hostent* hptr = socket_ops::gethostbyaddr(addr, static_cast(addr_len), sa->sa_family, &hent, hbuf, sizeof(hbuf), ec); if (hptr && hptr->h_name && hptr->h_name[0] != '\0') { if (flags & NI_NOFQDN) { char* dot = strchr(hptr->h_name, '.'); if (dot) { *dot = 0; } } gai_strcpy(host, hptr->h_name, hostlen); socket_ops::freehostent(hptr); } else { socket_ops::freehostent(hptr); if (flags & NI_NAMEREQD) { return ec = asio::error::host_not_found; } if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) { return ec; } } } } if (serv && servlen > 0) { if (flags & NI_NUMERICSERV) { if (servlen < 6) { return ec = asio::error::no_buffer_space; } #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(serv, servlen, "%u", ntohs(port)); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(serv, "%u", ntohs(port)); #endif // defined(ASIO_HAS_SECURE_RTL) } else { #if defined(ASIO_HAS_PTHREADS) static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; ::pthread_mutex_lock(&mutex); #endif // defined(ASIO_HAS_PTHREADS) servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? "udp" : 0); if (sptr && sptr->s_name && sptr->s_name[0] != '\0') { gai_strcpy(serv, sptr->s_name, servlen); } else { if (servlen < 6) { return ec = asio::error::no_buffer_space; } #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(serv, servlen, "%u", ntohs(port)); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(serv, "%u", ntohs(port)); #endif // defined(ASIO_HAS_SECURE_RTL) } #if defined(ASIO_HAS_PTHREADS) ::pthread_mutex_unlock(&mutex); #endif // defined(ASIO_HAS_PTHREADS) } } ec = asio::error_code(); return ec; } #endif // !defined(ASIO_HAS_GETADDRINFO) inline asio::error_code translate_addrinfo_error(int error) { switch (error) { case 0: return asio::error_code(); case EAI_AGAIN: return asio::error::host_not_found_try_again; case EAI_BADFLAGS: return asio::error::invalid_argument; case EAI_FAIL: return asio::error::no_recovery; case EAI_FAMILY: return asio::error::address_family_not_supported; case EAI_MEMORY: return asio::error::no_memory; case EAI_NONAME: #if defined(EAI_ADDRFAMILY) case EAI_ADDRFAMILY: #endif #if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME) case EAI_NODATA: #endif return asio::error::host_not_found; case EAI_SERVICE: return asio::error::service_not_found; case EAI_SOCKTYPE: return asio::error::socket_type_not_supported; default: // Possibly the non-portable EAI_SYSTEM. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) return asio::error_code( WSAGetLastError(), asio::error::get_system_category()); #else return asio::error_code( errno, asio::error::get_system_category()); #endif } } asio::error_code getaddrinfo(const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec) { host = (host && *host) ? host : 0; service = (service && *service) ? service : 0; clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. int error = ::getaddrinfo(host, service, &hints, result); return ec = translate_addrinfo_error(error); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *gai_t)(const char*, const char*, const addrinfo_type*, addrinfo_type**); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, "getaddrinfo")) { int error = gai(host, service, &hints, result); return ec = translate_addrinfo_error(error); } } int error = getaddrinfo_emulation(host, service, &hints, result); return ec = translate_addrinfo_error(error); # endif #elif !defined(ASIO_HAS_GETADDRINFO) int error = getaddrinfo_emulation(host, service, &hints, result); return ec = translate_addrinfo_error(error); #else int error = ::getaddrinfo(host, service, &hints, result); return ec = translate_addrinfo_error(error); #endif } asio::error_code background_getaddrinfo( const weak_cancel_token_type& cancel_token, const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else socket_ops::getaddrinfo(host, service, hints, result, ec); return ec; } void freeaddrinfo(addrinfo_type* ai) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. ::freeaddrinfo(ai); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *fai_t)(addrinfo_type*); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, "freeaddrinfo")) { fai(ai); return; } } freeaddrinfo_emulation(ai); # endif #elif !defined(ASIO_HAS_GETADDRINFO) freeaddrinfo_emulation(ai); #else ::freeaddrinfo(ai); #endif } asio::error_code getnameinfo(const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. clear_last_error(); int error = ::getnameinfo(addr, static_cast(addrlen), host, static_cast(hostlen), serv, static_cast(servlen), flags); return ec = translate_addrinfo_error(error); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *gni_t)(const socket_addr_type*, int, char*, DWORD, char*, DWORD, int); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, "getnameinfo")) { clear_last_error(); int error = gni(addr, static_cast(addrlen), host, static_cast(hostlen), serv, static_cast(servlen), flags); return ec = translate_addrinfo_error(error); } } clear_last_error(); return getnameinfo_emulation(addr, addrlen, host, hostlen, serv, servlen, flags, ec); # endif #elif !defined(ASIO_HAS_GETADDRINFO) using namespace std; // For memcpy. sockaddr_storage_type tmp_addr; memcpy(&tmp_addr, addr, addrlen); tmp_addr.ss_len = addrlen; addr = reinterpret_cast(&tmp_addr); clear_last_error(); return getnameinfo_emulation(addr, addrlen, host, hostlen, serv, servlen, flags, ec); #else clear_last_error(); int error = ::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags); return ec = translate_addrinfo_error(error); #endif } asio::error_code sync_getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec) { // First try resolving with the service name. If that fails try resolving // but allow the service to be returned as a number. int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags, ec); if (ec) { socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags | NI_NUMERICSERV, ec); } return ec; } asio::error_code background_getnameinfo( const weak_cancel_token_type& cancel_token, const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec) { if (cancel_token.expired()) { ec = asio::error::operation_aborted; } else { // First try resolving with the service name. If that fails try resolving // but allow the service to be returned as a number. int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags, ec); if (ec) { socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags | NI_NUMERICSERV, ec); } } return ec; } #endif // !defined(ASIO_WINDOWS_RUNTIME) u_long_type network_to_host_long(u_long_type value) { #if defined(ASIO_WINDOWS_RUNTIME) unsigned char* value_p = reinterpret_cast(&value); u_long_type result = (static_cast(value_p[0]) << 24) | (static_cast(value_p[1]) << 16) | (static_cast(value_p[2]) << 8) | static_cast(value_p[3]); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return ntohl(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_long_type host_to_network_long(u_long_type value) { #if defined(ASIO_WINDOWS_RUNTIME) u_long_type result; unsigned char* result_p = reinterpret_cast(&result); result_p[0] = static_cast((value >> 24) & 0xFF); result_p[1] = static_cast((value >> 16) & 0xFF); result_p[2] = static_cast((value >> 8) & 0xFF); result_p[3] = static_cast(value & 0xFF); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return htonl(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_short_type network_to_host_short(u_short_type value) { #if defined(ASIO_WINDOWS_RUNTIME) unsigned char* value_p = reinterpret_cast(&value); u_short_type result = (static_cast(value_p[0]) << 8) | static_cast(value_p[1]); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return ntohs(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_short_type host_to_network_short(u_short_type value) { #if defined(ASIO_WINDOWS_RUNTIME) u_short_type result; unsigned char* result_p = reinterpret_cast(&result); result_p[0] = static_cast((value >> 8) & 0xFF); result_p[1] = static_cast(value & 0xFF); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return htons(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } } // namespace socket_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_OPS_IPP galera-26.4.3/asio/asio/detail/impl/reactive_serial_port_service.ipp0000664000177500017540000000766313540715002024122 0ustar dbartmy// // detail/impl/reactive_serial_port_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/detail/reactive_serial_port_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_serial_port_service::reactive_serial_port_service( asio::io_service& io_service) : descriptor_service_(io_service) { } void reactive_serial_port_service::shutdown_service() { descriptor_service_.shutdown_service(); } asio::error_code reactive_serial_port_service::open( reactive_serial_port_service::implementation_type& impl, const std::string& device, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } descriptor_ops::state_type state = 0; int fd = descriptor_ops::open(device.c_str(), O_RDWR | O_NONBLOCK | O_NOCTTY, ec); if (fd < 0) return ec; int s = descriptor_ops::fcntl(fd, F_GETFL, ec); if (s >= 0) s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec); if (s < 0) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); return ec; } // Set up default serial port options. termios ios; errno = 0; s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec); if (s >= 0) { #if defined(_BSD_SOURCE) ::cfmakeraw(&ios); #else ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); ios.c_oflag &= ~OPOST; ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); ios.c_cflag &= ~(CSIZE | PARENB); ios.c_cflag |= CS8; #endif ios.c_iflag |= IGNPAR; ios.c_cflag |= CREAD | CLOCAL; errno = 0; s = descriptor_ops::error_wrapper(::tcsetattr(fd, TCSANOW, &ios), ec); } if (s < 0) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); return ec; } // We're done. Take ownership of the serial port descriptor. if (descriptor_service_.assign(impl, fd, ec)) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); } return ec; } asio::error_code reactive_serial_port_service::do_set_option( reactive_serial_port_service::implementation_type& impl, reactive_serial_port_service::store_function_type store, const void* option, asio::error_code& ec) { termios ios; errno = 0; descriptor_ops::error_wrapper(::tcgetattr( descriptor_service_.native_handle(impl), &ios), ec); if (ec) return ec; if (store(option, ios, ec)) return ec; errno = 0; descriptor_ops::error_wrapper(::tcsetattr( descriptor_service_.native_handle(impl), TCSANOW, &ios), ec); return ec; } asio::error_code reactive_serial_port_service::do_get_option( const reactive_serial_port_service::implementation_type& impl, reactive_serial_port_service::load_function_type load, void* option, asio::error_code& ec) const { termios ios; errno = 0; descriptor_ops::error_wrapper(::tcgetattr( descriptor_service_.native_handle(impl), &ios), ec); if (ec) return ec; return load(option, ios, ec); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/handler_tracking.ipp0000664000177500017540000002043413540715002021463 0ustar dbartmy// // detail/impl/handler_tracking.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP #define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_HANDLER_TRACKING) #include #include #include "asio/detail/handler_tracking.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/time_traits.hpp" #else // defined(ASIO_HAS_BOOST_DATE_TIME) # if defined(ASIO_HAS_STD_CHRONO) # include # elif defined(ASIO_HAS_BOOST_CHRONO) # include # endif # include "asio/detail/chrono_time_traits.hpp" # include "asio/wait_traits.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #if !defined(ASIO_WINDOWS) # include #endif // !defined(ASIO_WINDOWS) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct handler_tracking_timestamp { uint64_t seconds; uint64_t microseconds; handler_tracking_timestamp() { #if defined(ASIO_HAS_BOOST_DATE_TIME) boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); boost::posix_time::time_duration now = boost::posix_time::microsec_clock::universal_time() - epoch; #elif defined(ASIO_HAS_STD_CHRONO) typedef chrono_time_traits > traits_helper; traits_helper::posix_time_duration now( std::chrono::system_clock::now().time_since_epoch()); #elif defined(ASIO_HAS_BOOST_CHRONO) typedef chrono_time_traits > traits_helper; traits_helper::posix_time_duration now( boost::chrono::system_clock::now().time_since_epoch()); #endif seconds = static_cast(now.total_seconds()); microseconds = static_cast(now.total_microseconds() % 1000000); } }; struct handler_tracking::tracking_state { static_mutex mutex_; uint64_t next_id_; tss_ptr* current_completion_; }; handler_tracking::tracking_state* handler_tracking::get_state() { static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0 }; return &state; } void handler_tracking::init() { static tracking_state* state = get_state(); state->mutex_.init(); static_mutex::scoped_lock lock(state->mutex_); if (state->current_completion_ == 0) state->current_completion_ = new tss_ptr; } void handler_tracking::creation(handler_tracking::tracked_handler* h, const char* object_type, void* object, const char* op_name) { static tracking_state* state = get_state(); static_mutex::scoped_lock lock(state->mutex_); h->id_ = state->next_id_++; lock.unlock(); handler_tracking_timestamp timestamp; uint64_t current_id = 0; if (completion* current_completion = *state->current_completion_) current_id = current_completion->id_; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, current_id, h->id_, object_type, object, op_name); } handler_tracking::completion::completion(handler_tracking::tracked_handler* h) : id_(h->id_), invoked_(false), next_(*get_state()->current_completion_) { *get_state()->current_completion_ = this; } handler_tracking::completion::~completion() { if (id_) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%c%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%c%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, invoked_ ? '!' : '~', id_); } *get_state()->current_completion_ = next_; } void handler_tracking::completion::invocation_begin() { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value()); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, std::size_t bytes_transferred) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), static_cast(bytes_transferred)); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, int signal_number) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), signal_number); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, const char* arg) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), arg); invoked_ = true; } void handler_tracking::completion::invocation_end() { if (id_) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|<%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|<%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_); id_ = 0; } } void handler_tracking::operation(const char* object_type, void* object, const char* op_name) { static tracking_state* state = get_state(); handler_tracking_timestamp timestamp; unsigned long long current_id = 0; if (completion* current_completion = *state->current_completion_) current_id = current_completion->id_; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, current_id, object_type, object, op_name); } void handler_tracking::write_line(const char* format, ...) { using namespace std; // For sprintf (or equivalent). va_list args; va_start(args, format); char line[256] = ""; #if defined(ASIO_HAS_SECURE_RTL) int length = vsprintf_s(line, sizeof(line), format, args); #else // defined(ASIO_HAS_SECURE_RTL) int length = vsprintf(line, format, args); #endif // defined(ASIO_HAS_SECURE_RTL) va_end(args); #if defined(ASIO_WINDOWS) HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE); DWORD bytes_written = 0; ::WriteFile(stderr_handle, line, length, &bytes_written, 0); #else // defined(ASIO_WINDOWS) ::write(STDERR_FILENO, line, length); #endif // defined(ASIO_WINDOWS) } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP galera-26.4.3/asio/asio/detail/impl/win_iocp_socket_service_base.ipp0000664000177500017540000005374213540715002024065 0ustar dbartmy// // detail/impl/win_iocp_socket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_iocp_socket_service_base::win_iocp_socket_service_base( asio::io_service& io_service) : io_service_(io_service), iocp_service_(use_service(io_service)), reactor_(0), connect_ex_(0), mutex_(), impl_list_(0) { } void win_iocp_socket_service_base::shutdown_service() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); base_implementation_type* impl = impl_list_; while (impl) { asio::error_code ignored_ec; close_for_destruction(*impl); impl = impl->next_; } } void win_iocp_socket_service_base::construct( win_iocp_socket_service_base::base_implementation_type& impl) { impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_socket_service_base::base_move_construct( win_iocp_socket_service_base::base_implementation_type& impl, win_iocp_socket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; impl.cancel_token_ = other_impl.cancel_token_; other_impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_socket_service_base::base_move_assign( win_iocp_socket_service_base::base_implementation_type& impl, win_iocp_socket_service_base& other_service, win_iocp_socket_service_base::base_implementation_type& other_impl) { close_for_destruction(impl); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; impl.cancel_token_ = other_impl.cancel_token_; other_impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void win_iocp_socket_service_base::destroy( win_iocp_socket_service_base::base_implementation_type& impl) { close_for_destruction(impl); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code win_iocp_socket_service_base::close( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); // Check if the reactor was created, in which case we need to close the // socket on the reactor as well to cancel any operations that might be // running there. reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); } socket_ops::close(impl.socket_, impl.state_, false, ec); impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) return ec; } asio::error_code win_iocp_socket_service_base::cancel( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("socket", &impl, "cancel")); if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) { // The version of Windows supports cancellation from any thread. typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr; socket_type sock = impl.socket_; HANDLE sock_as_handle = reinterpret_cast(sock); if (!cancel_io_ex(sock_as_handle, 0)) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_NOT_FOUND) { // ERROR_NOT_FOUND means that there were no operations to be // cancelled. We swallow this error to match the behaviour on other // platforms. ec = asio::error_code(); } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } } else { ec = asio::error_code(); } } #if defined(ASIO_ENABLE_CANCELIO) else if (impl.safe_cancellation_thread_id_ == 0) { // No operations have been started, so there's nothing to cancel. ec = asio::error_code(); } else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) { // Asynchronous operations have been started from the current thread only, // so it is safe to try to cancel them using CancelIo. socket_type sock = impl.socket_; HANDLE sock_as_handle = reinterpret_cast(sock); if (!::CancelIo(sock_as_handle)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } } else { // Asynchronous operations have been started from more than one thread, // so cancellation is not safe. ec = asio::error::operation_not_supported; } #else // defined(ASIO_ENABLE_CANCELIO) else { // Cancellation is not supported as CancelIo may not be used. ec = asio::error::operation_not_supported; } #endif // defined(ASIO_ENABLE_CANCELIO) // Cancel any operations started via the reactor. if (!ec) { reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->cancel_ops(impl.socket_, impl.reactor_data_); } return ec; } asio::error_code win_iocp_socket_service_base::do_open( win_iocp_socket_service_base::base_implementation_type& impl, int family, int type, int protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } socket_holder sock(socket_ops::socket(family, type, protocol, ec)); if (sock.get() == invalid_socket) return ec; HANDLE sock_as_handle = reinterpret_cast(sock.get()); if (iocp_service_.register_handle(sock_as_handle, ec)) return ec; impl.socket_ = sock.release(); switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.cancel_token_.reset(static_cast(0), socket_ops::noop_deleter()); ec = asio::error_code(); return ec; } asio::error_code win_iocp_socket_service_base::do_assign( win_iocp_socket_service_base::base_implementation_type& impl, int type, socket_type native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } HANDLE sock_as_handle = reinterpret_cast(native_socket); if (iocp_service_.register_handle(sock_as_handle, ec)) return ec; impl.socket_ = native_socket; switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.cancel_token_.reset(static_cast(0), socket_ops::noop_deleter()); ec = asio::error_code(); return ec; } void win_iocp_socket_service_base::start_send_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (noop) iocp_service_.on_completion(op); else if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; int result = ::WSASend(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, flags, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_send_to_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, const socket_addr_type* addr, int addrlen, socket_base::message_flags flags, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; int result = ::WSASendTo(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, flags, addr, addrlen, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_receive_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (noop) iocp_service_.on_completion(op); else if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = ::WSARecv(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, &recv_flags, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_NETNAME_DELETED) last_error = WSAECONNRESET; else if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_null_buffers_receive_op( win_iocp_socket_service_base::base_implementation_type& impl, socket_base::message_flags flags, reactor_op* op) { if ((impl.state_ & socket_ops::stream_oriented) != 0) { // For stream sockets on Windows, we may issue a 0-byte overlapped // WSARecv to wait until there is data available on the socket. ::WSABUF buf = { 0, 0 }; start_receive_op(impl, &buf, 1, flags, false, op); } else { start_reactor_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, op); } } void win_iocp_socket_service_base::start_receive_from_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, socket_base::message_flags flags, int* addrlen, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = ::WSARecvFrom(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, &recv_flags, addr, addrlen, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_accept_op( win_iocp_socket_service_base::base_implementation_type& impl, bool peer_is_open, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else if (peer_is_open) iocp_service_.on_completion(op, asio::error::already_open); else { asio::error_code ec; new_socket.reset(socket_ops::socket(family, type, protocol, ec)); if (new_socket.get() == invalid_socket) iocp_service_.on_completion(op, ec); else { DWORD bytes_read = 0; BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer, 0, address_length, address_length, &bytes_read, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); } } } void win_iocp_socket_service_base::restart_accept_op( socket_type s, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op) { new_socket.reset(); iocp_service_.work_started(); asio::error_code ec; new_socket.reset(socket_ops::socket(family, type, protocol, ec)); if (new_socket.get() == invalid_socket) iocp_service_.on_completion(op, ec); else { DWORD bytes_read = 0; BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer, 0, address_length, address_length, &bytes_read, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_reactor_op( win_iocp_socket_service_base::base_implementation_type& impl, int op_type, reactor_op* op) { reactor& r = get_reactor(); update_cancellation_thread_id(impl); if (is_open(impl)) { r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false); return; } else op->ec_ = asio::error::bad_descriptor; iocp_service_.post_immediate_completion(op, false); } void win_iocp_socket_service_base::start_connect_op( win_iocp_socket_service_base::base_implementation_type& impl, int family, int type, const socket_addr_type* addr, std::size_t addrlen, win_iocp_socket_connect_op_base* op) { // If ConnectEx is available, use that. if (family == ASIO_OS_DEF(AF_INET) || family == ASIO_OS_DEF(AF_INET6)) { if (connect_ex_fn connect_ex = get_connect_ex(impl, type)) { union address_union { socket_addr_type base; sockaddr_in4_type v4; sockaddr_in6_type v6; } a; using namespace std; // For memset. memset(&a, 0, sizeof(a)); a.base.sa_family = family; socket_ops::bind(impl.socket_, &a.base, family == ASIO_OS_DEF(AF_INET) ? sizeof(a.v4) : sizeof(a.v6), op->ec_); if (op->ec_ && op->ec_ != asio::error::invalid_argument) { iocp_service_.post_immediate_completion(op, false); return; } op->connect_ex_ = true; update_cancellation_thread_id(impl); iocp_service_.work_started(); BOOL result = connect_ex(impl.socket_, addr, static_cast(addrlen), 0, 0, 0, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); return; } } // Otherwise, fall back to a reactor-based implementation. reactor& r = get_reactor(); update_cancellation_thread_id(impl); if ((impl.state_ & socket_ops::non_blocking) != 0 || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) { if (op->ec_ == asio::error::in_progress || op->ec_ == asio::error::would_block) { op->ec_ = asio::error_code(); r.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_, op, false, false); return; } } } r.post_immediate_completion(op, false); } void win_iocp_socket_service_base::close_for_destruction( win_iocp_socket_service_base::base_implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("socket", &impl, "close")); // Check if the reactor was created, in which case we need to close the // socket on the reactor as well to cancel any operations that might be // running there. reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); } asio::error_code ignored_ec; socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) } void win_iocp_socket_service_base::update_cancellation_thread_id( win_iocp_socket_service_base::base_implementation_type& impl) { #if defined(ASIO_ENABLE_CANCELIO) if (impl.safe_cancellation_thread_id_ == 0) impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) impl.safe_cancellation_thread_id_ = ~DWORD(0); #else // defined(ASIO_ENABLE_CANCELIO) (void)impl; #endif // defined(ASIO_ENABLE_CANCELIO) } reactor& win_iocp_socket_service_base::get_reactor() { reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (!r) { r = &(use_service(io_service_)); interlocked_exchange_pointer(reinterpret_cast(&reactor_), r); } return *r; } win_iocp_socket_service_base::connect_ex_fn win_iocp_socket_service_base::get_connect_ex( win_iocp_socket_service_base::base_implementation_type& impl, int type) { #if defined(ASIO_DISABLE_CONNECTEX) (void)impl; (void)type; return 0; #else // defined(ASIO_DISABLE_CONNECTEX) if (type != ASIO_OS_DEF(SOCK_STREAM) && type != ASIO_OS_DEF(SOCK_SEQPACKET)) return 0; void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0); if (!ptr) { GUID guid = { 0x25a207b9, 0xddf3, 0x4660, { 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } }; DWORD bytes = 0; if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0) { // Set connect_ex_ to a special value to indicate that ConnectEx is // unavailable. That way we won't bother trying to look it up again. ptr = this; } interlocked_exchange_pointer(&connect_ex_, ptr); } return reinterpret_cast(ptr == this ? 0 : ptr); #endif // defined(ASIO_DISABLE_CONNECTEX) } void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer( void** dest, void* exch, void* cmp) { #if defined(_M_IX86) return reinterpret_cast(InterlockedCompareExchange( reinterpret_cast(dest), reinterpret_cast(exch), reinterpret_cast(cmp))); #else return InterlockedCompareExchangePointer(dest, exch, cmp); #endif } void* win_iocp_socket_service_base::interlocked_exchange_pointer( void** dest, void* val) { #if defined(_M_IX86) return reinterpret_cast(InterlockedExchange( reinterpret_cast(dest), reinterpret_cast(val))); #else return InterlockedExchangePointer(dest, val); #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP galera-26.4.3/asio/asio/detail/impl/timer_queue_set.ipp0000664000177500017540000000427413540715002021367 0ustar dbartmy// // detail/impl/timer_queue_set.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP #define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { timer_queue_set::timer_queue_set() : first_(0) { } void timer_queue_set::insert(timer_queue_base* q) { q->next_ = first_; first_ = q; } void timer_queue_set::erase(timer_queue_base* q) { if (first_) { if (q == first_) { first_ = q->next_; q->next_ = 0; return; } for (timer_queue_base* p = first_; p->next_; p = p->next_) { if (p->next_ == q) { p->next_ = q->next_; q->next_ = 0; return; } } } } bool timer_queue_set::all_empty() const { for (timer_queue_base* p = first_; p; p = p->next_) if (!p->empty()) return false; return true; } long timer_queue_set::wait_duration_msec(long max_duration) const { long min_duration = max_duration; for (timer_queue_base* p = first_; p; p = p->next_) min_duration = p->wait_duration_msec(min_duration); return min_duration; } long timer_queue_set::wait_duration_usec(long max_duration) const { long min_duration = max_duration; for (timer_queue_base* p = first_; p; p = p->next_) min_duration = p->wait_duration_usec(min_duration); return min_duration; } void timer_queue_set::get_ready_timers(op_queue& ops) { for (timer_queue_base* p = first_; p; p = p->next_) p->get_ready_timers(ops); } void timer_queue_set::get_all_timers(op_queue& ops) { for (timer_queue_base* p = first_; p; p = p->next_) p->get_all_timers(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP galera-26.4.3/asio/asio/detail/impl/resolver_service_base.ipp0000664000177500017540000000613713540715002022543 0ustar dbartmy// // detail/impl/resolver_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/resolver_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolver_service_base::work_io_service_runner { public: work_io_service_runner(asio::io_service& io_service) : io_service_(io_service) {} void operator()() { io_service_.run(); } private: asio::io_service& io_service_; }; resolver_service_base::resolver_service_base( asio::io_service& io_service) : io_service_impl_(asio::use_service(io_service)), work_io_service_(new asio::io_service), work_io_service_impl_(asio::use_service< io_service_impl>(*work_io_service_)), work_(new asio::io_service::work(*work_io_service_)), work_thread_(0) { } resolver_service_base::~resolver_service_base() { shutdown_service(); } void resolver_service_base::shutdown_service() { work_.reset(); if (work_io_service_.get()) { work_io_service_->stop(); if (work_thread_.get()) { work_thread_->join(); work_thread_.reset(); } work_io_service_.reset(); } } void resolver_service_base::fork_service( asio::io_service::fork_event fork_ev) { if (work_thread_.get()) { if (fork_ev == asio::io_service::fork_prepare) { work_io_service_->stop(); work_thread_->join(); } else { work_io_service_->reset(); work_thread_.reset(new asio::detail::thread( work_io_service_runner(*work_io_service_))); } } } void resolver_service_base::construct( resolver_service_base::implementation_type& impl) { impl.reset(static_cast(0), socket_ops::noop_deleter()); } void resolver_service_base::destroy( resolver_service_base::implementation_type& impl) { ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel")); impl.reset(); } void resolver_service_base::cancel( resolver_service_base::implementation_type& impl) { ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel")); impl.reset(static_cast(0), socket_ops::noop_deleter()); } void resolver_service_base::start_resolve_op(operation* op) { start_work_thread(); io_service_impl_.work_started(); work_io_service_impl_.post_immediate_completion(op, false); } void resolver_service_base::start_work_thread() { asio::detail::mutex::scoped_lock lock(mutex_); if (!work_thread_.get()) { work_thread_.reset(new asio::detail::thread( work_io_service_runner(*work_io_service_))); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP galera-26.4.3/asio/asio/detail/impl/posix_thread.ipp0000664000177500017540000000310013540715002020644 0ustar dbartmy// // detail/impl/posix_thread.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP #define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_thread.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_thread::~posix_thread() { if (!joined_) ::pthread_detach(thread_); } void posix_thread::join() { if (!joined_) { ::pthread_join(thread_, 0); joined_ = true; } } void posix_thread::start_thread(func_base* arg) { int error = ::pthread_create(&thread_, 0, asio_detail_posix_thread_function, arg); if (error != 0) { delete arg; asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } } void* asio_detail_posix_thread_function(void* arg) { posix_thread::auto_func_base_ptr func = { static_cast(arg) }; func.ptr->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP galera-26.4.3/asio/asio/detail/impl/winrt_timer_scheduler.ipp0000664000177500017540000000546713540715002022576 0ustar dbartmy// // detail/impl/winrt_timer_scheduler.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP #define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/winrt_timer_scheduler.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { winrt_timer_scheduler::winrt_timer_scheduler( asio::io_service& io_service) : asio::detail::service_base(io_service), io_service_(use_service(io_service)), mutex_(), event_(), timer_queues_(), thread_(0), stop_thread_(false), shutdown_(false) { thread_ = new asio::detail::thread( bind_handler(&winrt_timer_scheduler::call_run_thread, this)); } winrt_timer_scheduler::~winrt_timer_scheduler() { shutdown_service(); } void winrt_timer_scheduler::shutdown_service() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; stop_thread_ = true; event_.signal(lock); lock.unlock(); if (thread_) { thread_->join(); delete thread_; thread_ = 0; } op_queue ops; timer_queues_.get_all_timers(ops); io_service_.abandon_operations(ops); } void winrt_timer_scheduler::fork_service(asio::io_service::fork_event) { } void winrt_timer_scheduler::init_task() { } void winrt_timer_scheduler::run_thread() { asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { const long max_wait_duration = 5 * 60 * 1000000; long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration); event_.wait_for_usec(lock, wait_duration); event_.clear(lock); op_queue ops; timer_queues_.get_ready_timers(ops); if (!ops.empty()) { lock.unlock(); io_service_.post_deferred_completions(ops); lock.lock(); } } } void winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler) { scheduler->run_thread(); } void winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP galera-26.4.3/asio/asio/detail/impl/posix_event.ipp0000664000177500017540000000213713540715002020527 0ustar dbartmy// // detail/impl/posix_event.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP #define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_event.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_event::posix_event() : state_(0) { int error = ::pthread_cond_init(&cond_, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP galera-26.4.3/asio/asio/detail/impl/throw_error.ipp0000664000177500017540000000351213540715002020536 0ustar dbartmy// // detail/impl/throw_error.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP #define ASIO_DETAIL_IMPL_THROW_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void do_throw_error(const asio::error_code& err) { asio::system_error e(err); asio::detail::throw_exception(e); } void do_throw_error(const asio::error_code& err, const char* location) { // boostify: non-boost code starts here #if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // Microsoft's implementation of std::system_error is non-conformant in that // it ignores the error code's message when a "what" string is supplied. We'll // work around this by explicitly formatting the "what" string. std::string what_msg = location; what_msg += ": "; what_msg += err.message(); asio::system_error e(err, what_msg); asio::detail::throw_exception(e); #else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // boostify: non-boost code ends here asio::system_error e(err, location); asio::detail::throw_exception(e); // boostify: non-boost code starts here #endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // boostify: non-boost code ends here } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP galera-26.4.3/asio/asio/detail/impl/strand_service.hpp0000664000177500017540000000642313540715002021200 0ustar dbartmy// // detail/impl/strand_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP #define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/addressof.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { inline strand_service::strand_impl::strand_impl() : operation(&strand_service::do_complete), locked_(false) { } struct strand_service::on_dispatch_exit { io_service_impl* io_service_; strand_impl* impl_; ~on_dispatch_exit() { impl_->mutex_.lock(); impl_->ready_queue_.push(impl_->waiting_queue_); bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); impl_->mutex_.unlock(); if (more_handlers) io_service_->post_immediate_completion(impl_, false); } }; template void strand_service::dispatch(strand_service::implementation_type& impl, Handler& handler) { // If we are already in the strand then the handler can run immediately. if (call_stack::contains(impl)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); return; } // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "strand", impl, "dispatch")); bool dispatch_immediately = do_dispatch(impl, p.p); operation* o = p.p; p.v = p.p = 0; if (dispatch_immediately) { // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_dispatch_exit on_exit = { &io_service_, impl }; (void)on_exit; completion_handler::do_complete( &io_service_, o, asio::error_code(), 0); } } // Request the io_service to invoke the given handler and return immediately. template void strand_service::post(strand_service::implementation_type& impl, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "strand", impl, "post")); do_post(impl, p.p, is_continuation); p.v = p.p = 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP galera-26.4.3/asio/asio/detail/impl/reactive_descriptor_service.ipp0000664000177500017540000001277313540715002023753 0ustar dbartmy// // detail/impl/reactive_descriptor_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/error.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_descriptor_service::reactive_descriptor_service( asio::io_service& io_service) : reactor_(asio::use_service(io_service)) { reactor_.init_task(); } void reactive_descriptor_service::shutdown_service() { } void reactive_descriptor_service::construct( reactive_descriptor_service::implementation_type& impl) { impl.descriptor_ = -1; impl.state_ = 0; } void reactive_descriptor_service::move_construct( reactive_descriptor_service::implementation_type& impl, reactive_descriptor_service::implementation_type& other_impl) { impl.descriptor_ = other_impl.descriptor_; other_impl.descriptor_ = -1; impl.state_ = other_impl.state_; other_impl.state_ = 0; reactor_.move_descriptor(impl.descriptor_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_descriptor_service::move_assign( reactive_descriptor_service::implementation_type& impl, reactive_descriptor_service& other_service, reactive_descriptor_service::implementation_type& other_impl) { destroy(impl); impl.descriptor_ = other_impl.descriptor_; other_impl.descriptor_ = -1; impl.state_ = other_impl.state_; other_impl.state_ = 0; other_service.reactor_.move_descriptor(impl.descriptor_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_descriptor_service::destroy( reactive_descriptor_service::implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("descriptor", &impl, "close")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, (impl.state_ & descriptor_ops::possible_dup) == 0); } asio::error_code ignored_ec; descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec); } asio::error_code reactive_descriptor_service::assign( reactive_descriptor_service::implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (int err = reactor_.register_descriptor( native_descriptor, impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.descriptor_ = native_descriptor; impl.state_ = descriptor_ops::possible_dup; ec = asio::error_code(); return ec; } asio::error_code reactive_descriptor_service::close( reactive_descriptor_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("descriptor", &impl, "close")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, (impl.state_ & descriptor_ops::possible_dup) == 0); } descriptor_ops::close(impl.descriptor_, impl.state_, ec); // The descriptor is closed by the OS even if close() returns an error. // // (Actually, POSIX says the state of the descriptor is unspecified. On // Linux the descriptor is apparently closed anyway; e.g. see // http://lkml.org/lkml/2005/9/10/129 // We'll just have to assume that other OSes follow the same behaviour.) construct(impl); return ec; } reactive_descriptor_service::native_handle_type reactive_descriptor_service::release( reactive_descriptor_service::implementation_type& impl) { native_handle_type descriptor = impl.descriptor_; if (is_open(impl)) { ASIO_HANDLER_OPERATION(("descriptor", &impl, "release")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false); construct(impl); } return descriptor; } asio::error_code reactive_descriptor_service::cancel( reactive_descriptor_service::implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION(("descriptor", &impl, "cancel")); reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_); ec = asio::error_code(); return ec; } void reactive_descriptor_service::start_op( reactive_descriptor_service::implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop) { if (!noop) { if ((impl.state_ & descriptor_ops::non_blocking) || descriptor_ops::set_internal_non_blocking( impl.descriptor_, impl.state_, true, op->ec_)) { reactor_.start_op(op_type, impl.descriptor_, impl.reactor_data_, op, is_continuation, is_non_blocking); return; } } reactor_.post_immediate_completion(op, is_continuation); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/task_io_service.hpp0000664000177500017540000000421513540715002021333 0ustar dbartmy// // detail/impl/task_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP #define ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/addressof.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void task_io_service::dispatch(Handler& handler) { if (thread_call_stack::contains(this)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); } else { // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch")); do_dispatch(p.p); p.v = p.p = 0; } } template void task_io_service::post(Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "post")); post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP galera-26.4.3/asio/asio/detail/impl/pipe_select_interrupter.ipp0000664000177500017540000000565313540715002023131 0ustar dbartmy// // detail/impl/pipe_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(ASIO_WINDOWS) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(ASIO_HAS_EVENTFD) #include #include #include #include #include "asio/detail/pipe_select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { pipe_select_interrupter::pipe_select_interrupter() { open_descriptors(); } void pipe_select_interrupter::open_descriptors() { int pipe_fds[2]; if (pipe(pipe_fds) == 0) { read_descriptor_ = pipe_fds[0]; ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); write_descriptor_ = pipe_fds[1]; ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); #if defined(FD_CLOEXEC) ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); #endif // defined(FD_CLOEXEC) } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "pipe_select_interrupter"); } } pipe_select_interrupter::~pipe_select_interrupter() { close_descriptors(); } void pipe_select_interrupter::close_descriptors() { if (read_descriptor_ != -1) ::close(read_descriptor_); if (write_descriptor_ != -1) ::close(write_descriptor_); } void pipe_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = -1; read_descriptor_ = -1; open_descriptors(); } void pipe_select_interrupter::interrupt() { char byte = 0; signed_size_type result = ::write(write_descriptor_, &byte, 1); (void)result; } bool pipe_select_interrupter::reset() { for (;;) { char data[1024]; signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = ::read(read_descriptor_, data, sizeof(data)); return was_interrupted; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(ASIO_WINDOWS) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP galera-26.4.3/asio/asio/detail/impl/win_object_handle_service.ipp0000664000177500017540000002712313540715002023344 0ustar dbartmy// // detail/impl/win_object_handle_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #include "asio/detail/win_object_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_object_handle_service::win_object_handle_service( asio::io_service& io_service) : io_service_(asio::use_service(io_service)), mutex_(), impl_list_(0), shutdown_(false) { } void win_object_handle_service::shutdown_service() { mutex::scoped_lock lock(mutex_); // Setting this flag to true prevents new objects from being registered, and // new asynchronous wait operations from being started. We only need to worry // about cleaning up the operations that are currently in progress. shutdown_ = true; op_queue ops; for (implementation_type* impl = impl_list_; impl; impl = impl->next_) ops.push(impl->op_queue_); lock.unlock(); io_service_.abandon_operations(ops); } void win_object_handle_service::construct( win_object_handle_service::implementation_type& impl) { impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.owner_ = this; // Insert implementation into linked list of all implementations. mutex::scoped_lock lock(mutex_); if (!shutdown_) { impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } } void win_object_handle_service::move_construct( win_object_handle_service::implementation_type& impl, win_object_handle_service::implementation_type& other_impl) { mutex::scoped_lock lock(mutex_); // Insert implementation into linked list of all implementations. if (!shutdown_) { impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = other_impl.wait_handle_; other_impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.op_queue_.push(other_impl.op_queue_); impl.owner_ = this; // We must not hold the lock while calling UnregisterWaitEx. This is because // the registered callback function might be invoked while we are waiting for // UnregisterWaitEx to complete. lock.unlock(); if (impl.wait_handle_ != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); if (!impl.op_queue_.empty()) register_wait_callback(impl, lock); } void win_object_handle_service::move_assign( win_object_handle_service::implementation_type& impl, win_object_handle_service& other_service, win_object_handle_service::implementation_type& other_impl) { asio::error_code ignored_ec; close(impl, ignored_ec); mutex::scoped_lock lock(mutex_); if (this != &other_service) { // Remove implementation from linked list of all implementations. if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = other_impl.wait_handle_; other_impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.op_queue_.push(other_impl.op_queue_); impl.owner_ = this; if (this != &other_service) { // Insert implementation into linked list of all implementations. impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } // We must not hold the lock while calling UnregisterWaitEx. This is because // the registered callback function might be invoked while we are waiting for // UnregisterWaitEx to complete. lock.unlock(); if (impl.wait_handle_ != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); if (!impl.op_queue_.empty()) register_wait_callback(impl, lock); } void win_object_handle_service::destroy( win_object_handle_service::implementation_type& impl) { mutex::scoped_lock lock(mutex_); // Remove implementation from linked list of all implementations. if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; if (is_open(impl)) { ASIO_HANDLER_OPERATION(("object_handle", &impl, "close")); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.op_queue_.pop(); ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); ::CloseHandle(impl.handle_); impl.handle_ = INVALID_HANDLE_VALUE; io_service_.post_deferred_completions(ops); } } asio::error_code win_object_handle_service::assign( win_object_handle_service::implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } impl.handle_ = handle; ec = asio::error_code(); return ec; } asio::error_code win_object_handle_service::close( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("object_handle", &impl, "close")); mutex::scoped_lock lock(mutex_); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { impl.op_queue_.pop(); op->ec_ = asio::error::operation_aborted; completed_ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); if (::CloseHandle(impl.handle_)) { impl.handle_ = INVALID_HANDLE_VALUE; ec = asio::error_code(); } else { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } io_service_.post_deferred_completions(completed_ops); } else { ec = asio::error_code(); } return ec; } asio::error_code win_object_handle_service::cancel( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION(("object_handle", &impl, "cancel")); mutex::scoped_lock lock(mutex_); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.op_queue_.pop(); completed_ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); ec = asio::error_code(); io_service_.post_deferred_completions(completed_ops); } else { ec = asio::error::bad_descriptor; } return ec; } void win_object_handle_service::wait( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { switch (::WaitForSingleObject(impl.handle_, INFINITE)) { case WAIT_FAILED: { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); break; } case WAIT_OBJECT_0: case WAIT_ABANDONED: default: ec = asio::error_code(); break; } } void win_object_handle_service::start_wait_op( win_object_handle_service::implementation_type& impl, wait_op* op) { io_service_.work_started(); if (is_open(impl)) { mutex::scoped_lock lock(mutex_); if (!shutdown_) { impl.op_queue_.push(op); // Only the first operation to be queued gets to register a wait callback. // Subsequent operations have to wait for the first to finish. if (impl.op_queue_.front() == op) register_wait_callback(impl, lock); } else { lock.unlock(); io_service_.post_deferred_completion(op); } } else { op->ec_ = asio::error::bad_descriptor; io_service_.post_deferred_completion(op); } } void win_object_handle_service::register_wait_callback( win_object_handle_service::implementation_type& impl, mutex::scoped_lock& lock) { lock.lock(); if (!RegisterWaitForSingleObject(&impl.wait_handle_, impl.handle_, &win_object_handle_service::wait_callback, &impl, INFINITE, WT_EXECUTEONLYONCE)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = ec; impl.op_queue_.pop(); completed_ops.push(op); } lock.unlock(); io_service_.post_deferred_completions(completed_ops); } } void win_object_handle_service::wait_callback(PVOID param, BOOLEAN) { implementation_type* impl = static_cast(param); mutex::scoped_lock lock(impl->owner_->mutex_); if (impl->wait_handle_ != INVALID_HANDLE_VALUE) { ::UnregisterWaitEx(impl->wait_handle_, NULL); impl->wait_handle_ = INVALID_HANDLE_VALUE; } if (wait_op* op = impl->op_queue_.front()) { op_queue completed_ops; op->ec_ = asio::error_code(); impl->op_queue_.pop(); completed_ops.push(op); if (!impl->op_queue_.empty()) { if (!RegisterWaitForSingleObject(&impl->wait_handle_, impl->handle_, &win_object_handle_service::wait_callback, param, INFINITE, WT_EXECUTEONLYONCE)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); while ((op = impl->op_queue_.front()) != 0) { op->ec_ = ec; impl->op_queue_.pop(); completed_ops.push(op); } } } io_service_impl& ios = impl->owner_->io_service_; lock.unlock(); ios.post_deferred_completions(completed_ops); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP galera-26.4.3/asio/asio/detail/impl/epoll_reactor.hpp0000664000177500017540000000362313540715002021016 0ustar dbartmy// // detail/impl/epoll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP #define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if defined(ASIO_HAS_EPOLL) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void epoll_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } template void epoll_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void epoll_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { mutex::scoped_lock lock(mutex_); if (shutdown_) { io_service_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); io_service_.work_started(); if (earliest) update_timeout(); } template std::size_t epoll_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); io_service_.post_deferred_completions(ops); return n; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP galera-26.4.3/asio/asio/detail/socket_option.hpp0000664000177500017540000001421413540715002020101 0ustar dbartmy// // detail/socket_option.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPTION_HPP #define ASIO_DETAIL_SOCKET_OPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_option { // Helper template for implementing boolean-based options. template class boolean { public: // Default constructor. boolean() : value_(0) { } // Construct with a specific option value. explicit boolean(bool v) : value_(v ? 1 : 0) { } // Set the current value of the boolean. boolean& operator=(bool v) { value_ = v ? 1 : 0; return *this; } // Get the current value of the boolean. bool value() const { return !!value_; } // Convert to bool. operator bool() const { return !!value_; } // Test for false. bool operator!() const { return !value_; } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the boolean data. template int* data(const Protocol&) { return &value_; } // Get the address of the boolean data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the boolean data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the boolean data. template void resize(const Protocol&, std::size_t s) { // On some platforms (e.g. Windows Vista), the getsockopt function will // return the size of a boolean socket option as one byte, even though a // four byte integer was passed in. switch (s) { case sizeof(char): value_ = *reinterpret_cast(&value_) ? 1 : 0; break; case sizeof(value_): break; default: { std::length_error ex("boolean socket option resize"); asio::detail::throw_exception(ex); } } } private: int value_; }; // Helper template for implementing integer options. template class integer { public: // Default constructor. integer() : value_(0) { } // Construct with a specific option value. explicit integer(int v) : value_(v) { } // Set the value of the int option. integer& operator=(int v) { value_ = v; return *this; } // Get the current value of the int option. int value() const { return value_; } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the int data. template int* data(const Protocol&) { return &value_; } // Get the address of the int data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the int data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the int data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("integer socket option resize"); asio::detail::throw_exception(ex); } } private: int value_; }; // Helper template for implementing linger options. template class linger { public: // Default constructor. linger() { value_.l_onoff = 0; value_.l_linger = 0; } // Construct with specific option values. linger(bool e, int t) { enabled(e); timeout ASIO_PREVENT_MACRO_SUBSTITUTION(t); } // Set the value for whether linger is enabled. void enabled(bool value) { value_.l_onoff = value ? 1 : 0; } // Get the value for whether linger is enabled. bool enabled() const { return value_.l_onoff != 0; } // Set the value for the linger timeout. void timeout ASIO_PREVENT_MACRO_SUBSTITUTION(int value) { #if defined(WIN32) value_.l_linger = static_cast(value); #else value_.l_linger = value; #endif } // Get the value for the linger timeout. int timeout ASIO_PREVENT_MACRO_SUBSTITUTION() const { return static_cast(value_.l_linger); } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the linger data. template detail::linger_type* data(const Protocol&) { return &value_; } // Get the address of the linger data. template const detail::linger_type* data(const Protocol&) const { return &value_; } // Get the size of the linger data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the int data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("linger socket option resize"); asio::detail::throw_exception(ex); } } private: detail::linger_type value_; }; } // namespace socket_option } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_OPTION_HPP galera-26.4.3/asio/asio/detail/std_static_mutex.hpp0000664000177500017540000000270213540715002020603 0ustar dbartmy// // detail/std_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_STATIC_MUTEX_HPP #define ASIO_DETAIL_STD_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event; class std_static_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. std_static_mutex(int) { } // Destructor. ~std_static_mutex() { } // Initialise the mutex. void init() { // Nothing to do. } // Lock the mutex. void lock() { mutex_.lock(); } // Unlock the mutex. void unlock() { mutex_.unlock(); } private: friend class std_event; std::mutex mutex_; }; #define ASIO_STD_STATIC_MUTEX_INIT 0 } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_STATIC_MUTEX_HPP galera-26.4.3/asio/asio/detail/pop_options.hpp0000664000177500017540000000463713540715002017602 0ustar dbartmy// // detail/pop_options.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // No header guard #if defined(__COMO__) // Comeau C++ #elif defined(__DMC__) // Digital Mars C++ #elif defined(__INTEL_COMPILER) || defined(__ICL) \ || defined(__ICC) || defined(__ECC) // Intel C++ # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__clang__) // Clang # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if defined(ASIO_OBJC_WORKAROUND) # undef Protocol # undef id # undef ASIO_OBJC_WORKAROUND # endif # endif # endif # if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) # pragma GCC visibility pop # endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) #elif defined(__GNUC__) // GNU C++ # if defined(__MINGW32__) || defined(__CYGWIN__) # pragma pack (pop) # endif # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if defined(ASIO_OBJC_WORKAROUND) # undef Protocol # undef id # undef ASIO_OBJC_WORKAROUND # endif # endif # endif # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # pragma GCC visibility pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__KCC) // Kai C++ #elif defined(__sgi) // SGI MIPSpro C++ #elif defined(__DECCXX) // Compaq Tru64 Unix cxx #elif defined(__ghs) // Greenhills C++ #elif defined(__BORLANDC__) // Borland C++ # pragma option pop # pragma nopushoptwarn # pragma nopackwarning #elif defined(__MWERKS__) // Metrowerks CodeWarrior #elif defined(__SUNPRO_CC) // Sun Workshop Compiler C++ #elif defined(__HP_aCC) // HP aCC #elif defined(__MRC__) || defined(__SC__) // MPW MrCpp or SCpp #elif defined(__IBMCPP__) // IBM Visual Age #elif defined(_MSC_VER) // Microsoft Visual C++ // // Must remain the last #elif since some other vendors (Metrowerks, for example) // also #define _MSC_VER # pragma warning (pop) # pragma pack (pop) # if defined(__cplusplus_cli) || defined(__cplusplus_winrt) # if defined(ASIO_CLR_WORKAROUND) # undef generic # undef ASIO_CLR_WORKAROUND # endif # endif #endif galera-26.4.3/asio/asio/detail/strand_service.hpp0000664000177500017540000001076613540715002020244 0ustar dbartmy// // detail/strand_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STRAND_SERVICE_HPP #define ASIO_DETAIL_STRAND_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/io_service.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Default service implementation for a strand. class strand_service : public asio::detail::service_base { private: // Helper class to re-post the strand on exit. struct on_do_complete_exit; // Helper class to re-post the strand on exit. struct on_dispatch_exit; public: // The underlying implementation of a strand. class strand_impl : public operation { public: strand_impl(); private: // Only this service will have access to the internal values. friend class strand_service; friend struct on_do_complete_exit; friend struct on_dispatch_exit; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // Indicates whether the strand is currently "locked" by a handler. This // means that there is a handler upcall in progress, or that the strand // itself has been scheduled in order to invoke some pending handlers. bool locked_; // The handlers that are waiting on the strand but should not be run until // after the next time the strand is scheduled. This queue must only be // modified while the mutex is locked. op_queue waiting_queue_; // The handlers that are ready to be run. Logically speaking, these are the // handlers that hold the strand's lock. The ready queue is only modified // from within the strand and so may be accessed without locking the mutex. op_queue ready_queue_; }; typedef strand_impl* implementation_type; // Construct a new strand service for the specified io_service. ASIO_DECL explicit strand_service(asio::io_service& io_service); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Construct a new strand implementation. ASIO_DECL void construct(implementation_type& impl); // Request the io_service to invoke the given handler. template void dispatch(implementation_type& impl, Handler& handler); // Request the io_service to invoke the given handler and return immediately. template void post(implementation_type& impl, Handler& handler); // Determine whether the strand is running in the current thread. ASIO_DECL bool running_in_this_thread( const implementation_type& impl) const; private: // Helper function to dispatch a handler. Returns true if the handler should // be dispatched immediately. ASIO_DECL bool do_dispatch(implementation_type& impl, operation* op); // Helper fiunction to post a handler. ASIO_DECL void do_post(implementation_type& impl, operation* op, bool is_continuation); ASIO_DECL static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred); // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to the array of implementations. asio::detail::mutex mutex_; // Number of implementations shared between all strand objects. #if defined(ASIO_STRAND_IMPLEMENTATIONS) enum { num_implementations = ASIO_STRAND_IMPLEMENTATIONS }; #else // defined(ASIO_STRAND_IMPLEMENTATIONS) enum { num_implementations = 193 }; #endif // defined(ASIO_STRAND_IMPLEMENTATIONS) // Pool of implementations. scoped_ptr implementations_[num_implementations]; // Extra value used when hashing to prevent recycled memory locations from // getting the same strand implementation. std::size_t salt_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/strand_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/strand_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_STRAND_SERVICE_HPP galera-26.4.3/asio/asio/detail/macos_fenced_block.hpp0000664000177500017540000000235413540715002021003 0ustar dbartmy// // detail/macos_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP #define ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__MACH__) && defined(__APPLE__) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class macos_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit macos_fenced_block(half_t) { } // Constructor for a full fenced block. explicit macos_fenced_block(full_t) { OSMemoryBarrier(); } // Destructor. ~macos_fenced_block() { OSMemoryBarrier(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__MACH__) && defined(__APPLE__) #endif // ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP galera-26.4.3/asio/asio/detail/local_free_on_block_exit.hpp0000664000177500017540000000251713540715002022216 0ustar dbartmy// // detail/local_free_on_block_exit.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP #define ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if !defined(ASIO_WINDOWS_APP) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class local_free_on_block_exit : private noncopyable { public: // Constructor blocks all signals for the calling thread. explicit local_free_on_block_exit(void* p) : p_(p) { } // Destructor restores the previous signal mask. ~local_free_on_block_exit() { ::LocalFree(p_); } private: void* p_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS_APP) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP galera-26.4.3/asio/asio/detail/reactive_socket_sendto_op.hpp0000664000177500017540000000771213540715002022452 0ustar dbartmy// // detail/reactive_socket_sendto_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/addressof.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_sendto_op_base : public reactor_op { public: reactive_socket_sendto_op_base(socket_type socket, const ConstBufferSequence& buffers, const Endpoint& endpoint, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_sendto_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), destination_(endpoint), flags_(flags) { } static bool do_perform(reactor_op* base) { reactive_socket_sendto_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); return socket_ops::non_blocking_sendto(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->destination_.data(), o->destination_.size(), o->ec_, o->bytes_transferred_); } private: socket_type socket_; ConstBufferSequence buffers_; Endpoint destination_; socket_base::message_flags flags_; }; template class reactive_socket_sendto_op : public reactive_socket_sendto_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_sendto_op); reactive_socket_sendto_op(socket_type socket, const ConstBufferSequence& buffers, const Endpoint& endpoint, socket_base::message_flags flags, Handler& handler) : reactive_socket_sendto_op_base(socket, buffers, endpoint, flags, &reactive_socket_sendto_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } static void do_complete(io_service_impl* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_sendto_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; ASIO_HANDLER_COMPLETION((o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); asio_handler_invoke_helpers::invoke(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP galera-26.4.3/asio/asio/detail/signal_init.hpp0000664000177500017540000000172713540715002017526 0ustar dbartmy// // detail/signal_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_INIT_HPP #define ASIO_DETAIL_SIGNAL_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class signal_init { public: // Constructor. signal_init() { std::signal(Signal, SIG_IGN); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_SIGNAL_INIT_HPP galera-26.4.3/asio/asio/detail/task_io_service.hpp0000664000177500017540000001376413540715002020403 0ustar dbartmy// // detail/task_io_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TASK_IO_SERVICE_HPP #define ASIO_DETAIL_TASK_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/error_code.hpp" #include "asio/io_service.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/event.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_fwd.hpp" #include "asio/detail/task_io_service_operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct task_io_service_thread_info; class task_io_service : public asio::detail::service_base { public: typedef task_io_service_operation operation; // Constructor. Specifies the number of concurrent threads that are likely to // run the io_service. If set to 1 certain optimisation are performed. ASIO_DECL task_io_service(asio::io_service& io_service, std::size_t concurrency_hint = 0); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Initialise the task, if required. ASIO_DECL void init_task(); // Run the event loop until interrupted or no more work. ASIO_DECL std::size_t run(asio::error_code& ec); // Run until interrupted or one operation is performed. ASIO_DECL std::size_t run_one(asio::error_code& ec); // Poll for operations without blocking. ASIO_DECL std::size_t poll(asio::error_code& ec); // Poll for one operation without blocking. ASIO_DECL std::size_t poll_one(asio::error_code& ec); // Interrupt the event processing loop. ASIO_DECL void stop(); // Determine whether the io_service is stopped. ASIO_DECL bool stopped() const; // Reset in preparation for a subsequent run invocation. ASIO_DECL void reset(); // Notify that some work has started. void work_started() { ++outstanding_work_; } // Notify that some work has finished. void work_finished() { if (--outstanding_work_ == 0) stop(); } // Return whether a handler can be dispatched immediately. bool can_dispatch() { return thread_call_stack::contains(this) != 0; } // Request invocation of the given handler. template void dispatch(Handler& handler); // Request invocation of the given handler and return immediately. template void post(Handler& handler); // Request invocation of the given operation and return immediately. Assumes // that work_started() has not yet been called for the operation. ASIO_DECL void post_immediate_completion( operation* op, bool is_continuation); // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operation. ASIO_DECL void post_deferred_completion(operation* op); // Request invocation of the given operations and return immediately. Assumes // that work_started() was previously called for each operation. ASIO_DECL void post_deferred_completions(op_queue& ops); // Process unfinished operations as part of a shutdown_service operation. // Assumes that work_started() was previously called for the operations. ASIO_DECL void abandon_operations(op_queue& ops); private: // Structure containing thread-specific data. typedef task_io_service_thread_info thread_info; // Enqueue the given operation following a failed attempt to dispatch the // operation for immediate invocation. ASIO_DECL void do_dispatch(operation* op); // Run at most one operation. May block. ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock, thread_info& this_thread, const asio::error_code& ec); // Poll for at most one operation. ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock, thread_info& this_thread, const asio::error_code& ec); // Stop the task and all idle threads. ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock); // Wake a single idle thread, or the task, and always unlock the mutex. ASIO_DECL void wake_one_thread_and_unlock( mutex::scoped_lock& lock); // Helper class to perform task-related operations on block exit. struct task_cleanup; friend struct task_cleanup; // Helper class to call work-related operations on block exit. struct work_cleanup; friend struct work_cleanup; // Whether to optimise for single-threaded use cases. const bool one_thread_; // Mutex to protect access to internal data. mutable mutex mutex_; // Event to wake up blocked threads. event wakeup_event_; // The task to be run by this service. reactor* task_; // Operation object to represent the position of the task in the queue. struct task_operation : operation { task_operation() : operation(0) {} } task_operation_; // Whether the task has been interrupted. bool task_interrupted_; // The count of unfinished work. atomic_count outstanding_work_; // The queue of handlers that are ready to be delivered. op_queue op_queue_; // Flag to indicate that the dispatcher has been stopped. bool stopped_; // Flag to indicate that the dispatcher has been shut down. bool shutdown_; // Per-thread call stack to track the state of each thread in the io_service. typedef call_stack thread_call_stack; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/task_io_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/task_io_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_TASK_IO_SERVICE_HPP galera-26.4.3/asio/asio/detail/timer_queue.hpp0000664000177500017540000002123413540715002017545 0ustar dbartmy// // detail/timer_queue.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_HPP #define ASIO_DETAIL_TIMER_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/cstdint.hpp" #include "asio/detail/date_time_fwd.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/wait_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class timer_queue : public timer_queue_base { public: // The time type. typedef typename Time_Traits::time_type time_type; // The duration type. typedef typename Time_Traits::duration_type duration_type; // Per-timer data. class per_timer_data { public: per_timer_data() : next_(0), prev_(0) {} private: friend class timer_queue; // The operations waiting on the timer. op_queue op_queue_; // The index of the timer in the heap. std::size_t heap_index_; // Pointers to adjacent timers in a linked list. per_timer_data* next_; per_timer_data* prev_; }; // Constructor. timer_queue() : timers_(), heap_() { } // Add a new timer to the queue. Returns true if this is the timer that is // earliest in the queue, in which case the reactor's event demultiplexing // function call may need to be interrupted and restarted. bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op) { // Enqueue the timer object. if (timer.prev_ == 0 && &timer != timers_) { if (this->is_positive_infinity(time)) { // No heap entry is required for timers that never expire. timer.heap_index_ = (std::numeric_limits::max)(); } else { // Put the new timer at the correct position in the heap. This is done // first since push_back() can throw due to allocation failure. timer.heap_index_ = heap_.size(); heap_entry entry = { time, &timer }; heap_.push_back(entry); up_heap(heap_.size() - 1); } // Insert the new timer into the linked list of active timers. timer.next_ = timers_; timer.prev_ = 0; if (timers_) timers_->prev_ = &timer; timers_ = &timer; } // Enqueue the individual timer operation. timer.op_queue_.push(op); // Interrupt reactor only if newly added timer is first to expire. return timer.heap_index_ == 0 && timer.op_queue_.front() == op; } // Whether there are no timers in the queue. virtual bool empty() const { return timers_ == 0; } // Get the time for the timer that is earliest in the queue. virtual long wait_duration_msec(long max_duration) const { if (heap_.empty()) return max_duration; return this->to_msec( Time_Traits::to_posix_duration( Time_Traits::subtract(heap_[0].time_, Time_Traits::now())), max_duration); } // Get the time for the timer that is earliest in the queue. virtual long wait_duration_usec(long max_duration) const { if (heap_.empty()) return max_duration; return this->to_usec( Time_Traits::to_posix_duration( Time_Traits::subtract(heap_[0].time_, Time_Traits::now())), max_duration); } // Dequeue all timers not later than the current time. virtual void get_ready_timers(op_queue& ops) { if (!heap_.empty()) { const time_type now = Time_Traits::now(); while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_)) { per_timer_data* timer = heap_[0].timer_; ops.push(timer->op_queue_); remove_timer(*timer); } } } // Dequeue all timers. virtual void get_all_timers(op_queue& ops) { while (timers_) { per_timer_data* timer = timers_; timers_ = timers_->next_; ops.push(timer->op_queue_); timer->next_ = 0; timer->prev_ = 0; } heap_.clear(); } // Cancel and dequeue operations for the given timer. std::size_t cancel_timer(per_timer_data& timer, op_queue& ops, std::size_t max_cancelled = (std::numeric_limits::max)()) { std::size_t num_cancelled = 0; if (timer.prev_ != 0 || &timer == timers_) { while (wait_op* op = (num_cancelled != max_cancelled) ? timer.op_queue_.front() : 0) { op->ec_ = asio::error::operation_aborted; timer.op_queue_.pop(); ops.push(op); ++num_cancelled; } if (timer.op_queue_.empty()) remove_timer(timer); } return num_cancelled; } private: // Move the item at the given index up the heap to its correct position. void up_heap(std::size_t index) { while (index > 0) { std::size_t parent = (index - 1) / 2; if (!Time_Traits::less_than(heap_[index].time_, heap_[parent].time_)) break; swap_heap(index, parent); index = parent; } } // Move the item at the given index down the heap to its correct position. void down_heap(std::size_t index) { std::size_t child = index * 2 + 1; while (child < heap_.size()) { std::size_t min_child = (child + 1 == heap_.size() || Time_Traits::less_than( heap_[child].time_, heap_[child + 1].time_)) ? child : child + 1; if (Time_Traits::less_than(heap_[index].time_, heap_[min_child].time_)) break; swap_heap(index, min_child); index = min_child; child = index * 2 + 1; } } // Swap two entries in the heap. void swap_heap(std::size_t index1, std::size_t index2) { heap_entry tmp = heap_[index1]; heap_[index1] = heap_[index2]; heap_[index2] = tmp; heap_[index1].timer_->heap_index_ = index1; heap_[index2].timer_->heap_index_ = index2; } // Remove a timer from the heap and list of timers. void remove_timer(per_timer_data& timer) { // Remove the timer from the heap. std::size_t index = timer.heap_index_; if (!heap_.empty() && index < heap_.size()) { if (index == heap_.size() - 1) { heap_.pop_back(); } else { swap_heap(index, heap_.size() - 1); heap_.pop_back(); if (index > 0 && Time_Traits::less_than( heap_[index].time_, heap_[(index - 1) / 2].time_)) up_heap(index); else down_heap(index); } } // Remove the timer from the linked list of active timers. if (timers_ == &timer) timers_ = timer.next_; if (timer.prev_) timer.prev_->next_ = timer.next_; if (timer.next_) timer.next_->prev_= timer.prev_; timer.next_ = 0; timer.prev_ = 0; } // Determine if the specified absolute time is positive infinity. template static bool is_positive_infinity(const Time_Type&) { return false; } // Determine if the specified absolute time is positive infinity. template static bool is_positive_infinity( const boost::date_time::base_time& time) { return time.is_pos_infinity(); } // Helper function to convert a duration into milliseconds. template long to_msec(const Duration& d, long max_duration) const { if (d.ticks() <= 0) return 0; int64_t msec = d.total_milliseconds(); if (msec == 0) return 1; if (msec > max_duration) return max_duration; return static_cast(msec); } // Helper function to convert a duration into microseconds. template long to_usec(const Duration& d, long max_duration) const { if (d.ticks() <= 0) return 0; int64_t usec = d.total_microseconds(); if (usec == 0) return 1; if (usec > max_duration) return max_duration; return static_cast(usec); } // The head of a linked list of all active timers. per_timer_data* timers_; struct heap_entry { // The time when the timer should fire. time_type time_; // The associated timer with enqueued operations. per_timer_data* timer_; }; // The heap of timers, with the earliest timer at the front. std::vector heap_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TIMER_QUEUE_HPP galera-26.4.3/asio/asio/detail/epoll_reactor.hpp0000664000177500017540000001757313540715002020066 0ustar dbartmy// // detail/epoll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EPOLL_REACTOR_HPP #define ASIO_DETAIL_EPOLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EPOLL) #include "asio/io_service.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/object_pool.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class epoll_reactor : public asio::detail::service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor queues. class descriptor_state : operation { friend class epoll_reactor; friend class object_pool_access; descriptor_state* next_; descriptor_state* prev_; mutex mutex_; epoll_reactor* reactor_; int descriptor_; uint32_t registered_events_; op_queue op_queue_[max_ops]; bool shutdown_; ASIO_DECL descriptor_state(); void set_ready_events(uint32_t events) { task_result_ = events; } ASIO_DECL operation* perform_io(uint32_t events); ASIO_DECL static void do_complete( io_service_impl* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred); }; // Per-descriptor data. typedef descriptor_state* per_descriptor_data; // Constructor. ASIO_DECL epoll_reactor(asio::io_service& io_service); // Destructor. ASIO_DECL ~epoll_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown_service(); // Recreate internal descriptors following a fork. ASIO_DECL void fork_service( asio::io_service::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { io_service_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data, bool closing); // Remote the descriptor's registration from the reactor. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& timer_queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& timer_queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Run epoll once until interrupted or events are ready to be dispatched. ASIO_DECL void run(bool block, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: // The hint to pass to epoll_create to size its data structures. enum { epoll_size = 20000 }; // Create the epoll file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_epoll_create(); // Create the timerfd file descriptor. Does not throw. ASIO_DECL static int do_timerfd_create(); // Allocate a new descriptor state object. ASIO_DECL descriptor_state* allocate_descriptor_state(); // Free an existing descriptor state object. ASIO_DECL void free_descriptor_state(descriptor_state* s); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Called to recalculate and update the timeout. ASIO_DECL void update_timeout(); // Get the timeout value for the epoll_wait call. The timeout value is // returned as a number of milliseconds. A return value of -1 indicates // that epoll_wait should block indefinitely. ASIO_DECL int get_timeout(); #if defined(ASIO_HAS_TIMERFD) // Get the timeout value for the timer descriptor. The return value is the // flag argument to be used when calling timerfd_settime. ASIO_DECL int get_timeout(itimerspec& ts); #endif // defined(ASIO_HAS_TIMERFD) // The io_service implementation used to post completions. io_service_impl& io_service_; // Mutex to protect access to internal data. mutex mutex_; // The interrupter is used to break a blocking epoll_wait call. select_interrupter interrupter_; // The epoll file descriptor. int epoll_fd_; // The timer file descriptor. int timer_fd_; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; // Mutex to protect access to the registered descriptors. mutex registered_descriptors_mutex_; // Keep track of all registered descriptors. object_pool registered_descriptors_; // Helper class to do post-perform_io cleanup. struct perform_io_cleanup_on_block_exit; friend struct perform_io_cleanup_on_block_exit; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/epoll_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/epoll_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_EPOLL_REACTOR_HPP galera-26.4.3/asio/asio/ssl.hpp0000664000177500017540000000151613540715002014561 0ustar dbartmy// // ssl.hpp // ~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_HPP #define ASIO_SSL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/ssl/basic_context.hpp" #include "asio/ssl/context.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/context_service.hpp" #include "asio/ssl/error.hpp" #include "asio/ssl/rfc2818_verification.hpp" #include "asio/ssl/stream.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/stream_service.hpp" #include "asio/ssl/verify_context.hpp" #include "asio/ssl/verify_mode.hpp" #endif // ASIO_SSL_HPP galera-26.4.3/asio/asio/buffered_write_stream_fwd.hpp0000664000177500017540000000121013540715002021156 0ustar dbartmy// // buffered_write_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_WRITE_STREAM_FWD_HPP #define ASIO_BUFFERED_WRITE_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_write_stream; } // namespace asio #endif // ASIO_BUFFERED_WRITE_STREAM_FWD_HPP galera-26.4.3/asio/asio/raw_socket_service.hpp0000664000177500017540000003215713540715002017646 0ustar dbartmy// // raw_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_RAW_SOCKET_SERVICE_HPP #define ASIO_RAW_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_socket_service.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_socket_service.hpp" #else # include "asio/detail/reactive_socket_service.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a raw socket. template class raw_socket_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base > #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; private: // The type of the platform-specific implementation. #if defined(ASIO_WINDOWS_RUNTIME) typedef detail::null_socket_service service_impl_type; #elif defined(ASIO_HAS_IOCP) typedef detail::win_iocp_socket_service service_impl_type; #else typedef detail::reactive_socket_service service_impl_type; #endif public: /// The type of a raw socket. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef typename service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native socket type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef typename service_impl_type::native_handle_type native_type; #endif /// The native socket type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new raw socket service for the specified io_service. explicit raw_socket_service(asio::io_service& io_service) : asio::detail::service_base< raw_socket_service >(io_service), service_impl_(io_service) { } /// Construct a new raw socket implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new raw socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another raw socket implementation. void move_assign(implementation_type& impl, raw_socket_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } /// Move-construct a new raw socket implementation from another protocol /// type. template void converting_move_construct(implementation_type& impl, typename raw_socket_service< Protocol1>::implementation_type& other_impl, typename enable_if::value>::type* = 0) { service_impl_.template converting_move_construct( impl, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a raw socket implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } // Open a new raw socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (protocol.type() == ASIO_OS_DEF(SOCK_RAW)) service_impl_.open(impl, protocol, ec); else ec = asio::error::invalid_argument; return ec; } /// Assign an existing native socket to a raw socket. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { return service_impl_.assign(impl, protocol, native_socket, ec); } /// Determine whether the socket is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a raw socket implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native socket implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native socket implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the socket. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Determine whether the socket is at the out-of-band data mark. bool at_mark(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.at_mark(impl, ec); } /// Determine the number of bytes available for reading. std::size_t available(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.available(impl, ec); } // Bind the raw socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { return service_impl_.bind(impl, endpoint, ec); } /// Connect the raw socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { return service_impl_.connect(impl, peer_endpoint, ec); } /// Start an asynchronous connect. template ASIO_INITFN_RESULT_TYPE(ConnectHandler, void (asio::error_code)) async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { detail::async_result_init< ConnectHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ConnectHandler)(handler)); service_impl_.async_connect(impl, peer_endpoint, init.handler); return init.result.get(); } /// Set a socket option. template asio::error_code set_option(implementation_type& impl, const SettableSocketOption& option, asio::error_code& ec) { return service_impl_.set_option(impl, option, ec); } /// Get a socket option. template asio::error_code get_option(const implementation_type& impl, GettableSocketOption& option, asio::error_code& ec) const { return service_impl_.get_option(impl, option, ec); } /// Perform an IO control command on the socket. template asio::error_code io_control(implementation_type& impl, IoControlCommand& command, asio::error_code& ec) { return service_impl_.io_control(impl, command, ec); } /// Gets the non-blocking mode of the socket. bool non_blocking(const implementation_type& impl) const { return service_impl_.non_blocking(impl); } /// Sets the non-blocking mode of the socket. asio::error_code non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { return service_impl_.non_blocking(impl, mode, ec); } /// Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const implementation_type& impl) const { return service_impl_.native_non_blocking(impl); } /// Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { return service_impl_.native_non_blocking(impl, mode, ec); } /// Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.local_endpoint(impl, ec); } /// Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { return service_impl_.remote_endpoint(impl, ec); } /// Disable sends or receives on the socket. asio::error_code shutdown(implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { return service_impl_.shutdown(impl, what, ec); } /// Send the given data to the peer. template std::size_t send(implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.send(impl, buffers, flags, ec); } /// Start an asynchronous send. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_send(impl, buffers, flags, init.handler); return init.result.get(); } /// Send raw data to the specified endpoint. template std::size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.send_to(impl, buffers, destination, flags, ec); } /// Start an asynchronous send. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_send_to(impl, buffers, destination, flags, init.handler); return init.result.get(); } /// Receive some data from the peer. template std::size_t receive(implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.receive(impl, buffers, flags, ec); } /// Start an asynchronous receive. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_receive(impl, buffers, flags, init.handler); return init.result.get(); } /// Receive raw data with the endpoint of the sender. template std::size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { return service_impl_.receive_from(impl, buffers, sender_endpoint, flags, ec); } /// Start an asynchronous receive that will get the endpoint of the sender. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_receive_from(impl, buffers, sender_endpoint, flags, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_RAW_SOCKET_SERVICE_HPP galera-26.4.3/asio/asio/ssl/0000775000177500017540000000000013540715002014045 5ustar dbartmygalera-26.4.3/asio/asio/ssl/error.hpp0000664000177500017540000000447513540715002015721 0ustar dbartmy// // ssl/error.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_ERROR_HPP #define ASIO_SSL_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { enum ssl_errors { // Error numbers are those produced by openssl. }; extern ASIO_DECL const asio::error_category& get_ssl_category(); static const asio::error_category& ssl_category = asio::error::get_ssl_category(); } // namespace error namespace ssl { namespace error { enum stream_errors { #if defined(GENERATING_DOCUMENTATION) /// The underlying stream closed before the ssl stream gracefully shut down. stream_truncated #elif (OPENSSL_VERSION_NUMBER < 0x10100000L) && !defined(OPENSSL_IS_BORINGSSL) stream_truncated = ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ) #else stream_truncated = 1 #endif }; extern ASIO_DECL const asio::error_category& get_stream_category(); static const asio::error_category& stream_category = asio::ssl::error::get_stream_category(); } // namespace error } // namespace ssl } // namespace asio #if defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace std { template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; } // namespace std #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace asio { namespace error { inline asio::error_code make_error_code(ssl_errors e) { return asio::error_code( static_cast(e), get_ssl_category()); } } // namespace error namespace ssl { namespace error { inline asio::error_code make_error_code(stream_errors e) { return asio::error_code( static_cast(e), get_stream_category()); } } // namespace error } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_ERROR_HPP galera-26.4.3/asio/asio/ssl/rfc2818_verification.hpp0000664000177500017540000000545513540715002020426 0ustar dbartmy// // ssl/rfc2818_verification.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_RFC2818_VERIFICATION_HPP #define ASIO_SSL_RFC2818_VERIFICATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include "asio/ssl/detail/openssl_types.hpp" # include "asio/ssl/verify_context.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) /// Verifies a certificate against a hostname according to the rules described /// in RFC 2818. /** * @par Example * The following example shows how to synchronously open a secure connection to * a given host name: * @code * using asio::ip::tcp; * namespace ssl = asio::ssl; * typedef ssl::stream ssl_socket; * * // Create a context that uses the default paths for finding CA certificates. * ssl::context ctx(ssl::context::sslv23); * ctx.set_default_verify_paths(); * * // Open a socket and connect it to the remote host. * asio::io_service io_service; * ssl_socket sock(io_service, ctx); * tcp::resolver resolver(io_service); * tcp::resolver::query query("host.name", "https"); * asio::connect(sock.lowest_layer(), resolver.resolve(query)); * sock.lowest_layer().set_option(tcp::no_delay(true)); * * // Perform SSL handshake and verify the remote host's certificate. * sock.set_verify_mode(ssl::verify_peer); * sock.set_verify_callback(ssl::rfc2818_verification("host.name")); * sock.handshake(ssl_socket::client); * * // ... read and write as normal ... * @endcode */ class rfc2818_verification { public: /// The type of the function object's result. typedef bool result_type; /// Constructor. explicit rfc2818_verification(const std::string& host) : host_(host) { } /// Perform certificate verification. ASIO_DECL bool operator()(bool preverified, verify_context& ctx) const; private: // Helper function to check a host name against a pattern. ASIO_DECL static bool match_pattern(const char* pattern, std::size_t pattern_length, const char* host); // Helper function to check a host name against an IPv4 address // The host name to be checked. std::string host_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/rfc2818_verification.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_RFC2818_VERIFICATION_HPP galera-26.4.3/asio/asio/ssl/detail/0000775000177500017540000000000013540715002015307 5ustar dbartmygalera-26.4.3/asio/asio/ssl/detail/engine.hpp0000664000177500017540000001257213540715002017274 0ustar dbartmy// // ssl/detail/engine.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_ENGINE_HPP #define ASIO_SSL_DETAIL_ENGINE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/buffer.hpp" # include "asio/detail/static_mutex.hpp" # include "asio/ssl/detail/openssl_types.hpp" # include "asio/ssl/detail/verify_callback.hpp" # include "asio/ssl/stream_base.hpp" # include "asio/ssl/verify_mode.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class engine { public: enum want { // Returned by functions to indicate that the engine wants input. The input // buffer should be updated to point to the data. The engine then needs to // be called again to retry the operation. want_input_and_retry = -2, // Returned by functions to indicate that the engine wants to write output. // The output buffer points to the data to be written. The engine then // needs to be called again to retry the operation. want_output_and_retry = -1, // Returned by functions to indicate that the engine doesn't need input or // output. want_nothing = 0, // Returned by functions to indicate that the engine wants to write output. // The output buffer points to the data to be written. After that the // operation is complete, and the engine does not need to be called again. want_output = 1 }; // Construct a new engine for the specified context. ASIO_DECL explicit engine(SSL_CTX* context); // Destructor. ASIO_DECL ~engine(); // Get the underlying implementation in the native type. ASIO_DECL SSL* native_handle(); // Set the peer verification mode. ASIO_DECL asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec); // Set the peer verification depth. ASIO_DECL asio::error_code set_verify_depth( int depth, asio::error_code& ec); // Set a peer certificate verification callback. ASIO_DECL asio::error_code set_verify_callback( verify_callback_base* callback, asio::error_code& ec); // Perform an SSL handshake using either SSL_connect (client-side) or // SSL_accept (server-side). ASIO_DECL want handshake( stream_base::handshake_type type, asio::error_code& ec); // Perform a graceful shutdown of the SSL session. ASIO_DECL want shutdown(asio::error_code& ec); // Write bytes to the SSL session. ASIO_DECL want write(const asio::const_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred); // Read bytes from the SSL session. ASIO_DECL want read(const asio::mutable_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred); // Get output data to be written to the transport. ASIO_DECL asio::mutable_buffers_1 get_output( const asio::mutable_buffer& data); // Put input data that was read from the transport. ASIO_DECL asio::const_buffer put_input( const asio::const_buffer& data); // Map an error::eof code returned by the underlying transport according to // the type and state of the SSL session. Returns a const reference to the // error code object, suitable for passing to a completion handler. ASIO_DECL const asio::error_code& map_error_code( asio::error_code& ec) const; private: // Disallow copying and assignment. engine(const engine&); engine& operator=(const engine&); // Callback used when the SSL implementation wants to verify a certificate. ASIO_DECL static int verify_callback_function( int preverified, X509_STORE_CTX* ctx); // The SSL_accept function may not be thread safe. This mutex is used to // protect all calls to the SSL_accept function. ASIO_DECL static asio::detail::static_mutex& accept_mutex(); // Perform one operation. Returns >= 0 on success or error, want_read if the // operation needs more input, or want_write if it needs to write some output // before the operation can complete. ASIO_DECL want perform(int (engine::* op)(void*, std::size_t), void* data, std::size_t length, asio::error_code& ec, std::size_t* bytes_transferred); // Adapt the SSL_accept function to the signature needed for perform(). ASIO_DECL int do_accept(void*, std::size_t); // Adapt the SSL_connect function to the signature needed for perform(). ASIO_DECL int do_connect(void*, std::size_t); // Adapt the SSL_shutdown function to the signature needed for perform(). ASIO_DECL int do_shutdown(void*, std::size_t); // Adapt the SSL_read function to the signature needed for perform(). ASIO_DECL int do_read(void* data, std::size_t length); // Adapt the SSL_write function to the signature needed for perform(). ASIO_DECL int do_write(void* data, std::size_t length); SSL* ssl_; BIO* ext_bio_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/detail/impl/engine.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_DETAIL_ENGINE_HPP galera-26.4.3/asio/asio/ssl/detail/handshake_op.hpp0000664000177500017540000000266613540715002020456 0ustar dbartmy// // ssl/detail/handshake_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP #define ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class handshake_op { public: handshake_op(stream_base::handshake_type type) : type_(type) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { bytes_transferred = 0; return eng.handshake(type_, ec); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t&) const { handler(ec); } private: stream_base::handshake_type type_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP galera-26.4.3/asio/asio/ssl/detail/verify_callback.hpp0000664000177500017540000000266613540715002021152 0ustar dbartmy// // ssl/detail/verify_callback.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP #define ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/verify_context.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class verify_callback_base { public: virtual ~verify_callback_base() { } virtual bool call(bool preverified, verify_context& ctx) = 0; }; template class verify_callback : public verify_callback_base { public: explicit verify_callback(VerifyCallback callback) : callback_(callback) { } virtual bool call(bool preverified, verify_context& ctx) { return callback_(preverified, ctx); } private: VerifyCallback callback_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP galera-26.4.3/asio/asio/ssl/detail/openssl_init.hpp0000664000177500017540000000555613540715002020541 0ustar dbartmy// // ssl/detail/openssl_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_OPENSSL_INIT_HPP #define ASIO_SSL_DETAIL_OPENSSL_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/shared_ptr.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class openssl_init_base : private noncopyable { protected: // Class that performs the actual initialisation. class do_init; // Helper function to manage a do_init singleton. The static instance of the // openssl_init object ensures that this function is always called before // main, and therefore before any other threads can get started. The do_init // instance must be static in this function to ensure that it gets // initialised before any other global objects try to use it. ASIO_DECL static asio::detail::shared_ptr instance(); #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) // Get an empty stack of compression methods, to be used when disabling // compression. ASIO_DECL static STACK_OF(SSL_COMP)* get_null_compression_methods(); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) }; template class openssl_init : private openssl_init_base { public: // Constructor. openssl_init() : ref_(instance()) { using namespace std; // For memmove. // Ensure openssl_init::instance_ is linked in. openssl_init* tmp = &instance_; memmove(&tmp, &tmp, sizeof(openssl_init*)); } // Destructor. ~openssl_init() { } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) using openssl_init_base::get_null_compression_methods; #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) private: // Instance to force initialisation of openssl at global scope. static openssl_init instance_; // Reference to singleton do_init object to ensure that openssl does not get // cleaned up until the last user has finished with it. asio::detail::shared_ptr ref_; }; template openssl_init openssl_init::instance_; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/detail/impl/openssl_init.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_DETAIL_OPENSSL_INIT_HPP galera-26.4.3/asio/asio/ssl/detail/write_op.hpp0000664000177500017540000000324313540715002017652 0ustar dbartmy// // ssl/detail/write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_WRITE_OP_HPP #define ASIO_SSL_DETAIL_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/buffer_sequence_adapter.hpp" # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template class write_op { public: write_op(const ConstBufferSequence& buffers) : buffers_(buffers) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { asio::const_buffer buffer = asio::detail::buffer_sequence_adapter::first(buffers_); return eng.write(buffer, ec, bytes_transferred); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: ConstBufferSequence buffers_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_WRITE_OP_HPP galera-26.4.3/asio/asio/ssl/detail/shutdown_op.hpp0000664000177500017540000000245513540715002020377 0ustar dbartmy// // ssl/detail/shutdown_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP #define ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class shutdown_op { public: engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { bytes_transferred = 0; return eng.shutdown(ec); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t&) const { handler(ec); } }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP galera-26.4.3/asio/asio/ssl/detail/password_callback.hpp0000664000177500017540000000306013540715002021475 0ustar dbartmy// // ssl/detail/password_callback.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP #define ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include # include "asio/ssl/context_base.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) class password_callback_base { public: virtual ~password_callback_base() { } virtual std::string call(std::size_t size, context_base::password_purpose purpose) = 0; }; template class password_callback : public password_callback_base { public: explicit password_callback(PasswordCallback callback) : callback_(callback) { } virtual std::string call(std::size_t size, context_base::password_purpose purpose) { return callback_(size, purpose); } private: PasswordCallback callback_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP galera-26.4.3/asio/asio/ssl/detail/openssl_types.hpp0000664000177500017540000000155213540715002020732 0ustar dbartmy// // ssl/detail/openssl_types.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP #define ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #if !defined(OPENSSL_NO_ENGINE) # include #endif // !defined(OPENSSL_NO_ENGINE) #include #include #include #include #include "asio/detail/socket_types.hpp" #endif // ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP galera-26.4.3/asio/asio/ssl/detail/read_op.hpp0000664000177500017540000000324713540715002017437 0ustar dbartmy// // ssl/detail/read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_READ_OP_HPP #define ASIO_SSL_DETAIL_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/buffer_sequence_adapter.hpp" # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template class read_op { public: read_op(const MutableBufferSequence& buffers) : buffers_(buffers) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter::first(buffers_); return eng.read(buffer, ec, bytes_transferred); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: MutableBufferSequence buffers_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_READ_OP_HPP galera-26.4.3/asio/asio/ssl/detail/buffered_handshake_op.hpp0000664000177500017540000000604513540715002022313 0ustar dbartmy// // ssl/detail/buffered_handshake_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP #define ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template class buffered_handshake_op { public: buffered_handshake_op(stream_base::handshake_type type, const ConstBufferSequence& buffers) : type_(type), buffers_(buffers), total_buffer_size_(asio::buffer_size(buffers_)) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { typename ConstBufferSequence::const_iterator iter = buffers_.begin(); typename ConstBufferSequence::const_iterator end = buffers_.end(); std::size_t accumulated_size = 0; for (;;) { engine::want want = eng.handshake(type_, ec); if (want != engine::want_input_and_retry || bytes_transferred == total_buffer_size_) return want; // Find the next buffer piece to be fed to the engine. while (iter != end) { const_buffer buffer(*iter); // Skip over any buffers which have already been consumed by the engine. if (bytes_transferred >= accumulated_size + buffer_size(buffer)) { accumulated_size += buffer_size(buffer); ++iter; continue; } // The current buffer may have been partially consumed by the engine on // a previous iteration. If so, adjust the buffer to point to the // unused portion. if (bytes_transferred > accumulated_size) buffer = buffer + (bytes_transferred - accumulated_size); // Pass the buffer to the engine, and update the bytes transferred to // reflect the total number of bytes consumed so far. bytes_transferred += buffer_size(buffer); buffer = eng.put_input(buffer); bytes_transferred -= buffer_size(buffer); break; } } } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: stream_base::handshake_type type_; ConstBufferSequence buffers_; std::size_t total_buffer_size_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP galera-26.4.3/asio/asio/ssl/detail/io.hpp0000664000177500017540000002460313540715002016434 0ustar dbartmy// // ssl/detail/io.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IO_HPP #define ASIO_SSL_DETAIL_IO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/detail/engine.hpp" # include "asio/ssl/detail/stream_core.hpp" # include "asio/write.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) template std::size_t io(Stream& next_layer, stream_core& core, const Operation& op, asio::error_code& ec) { std::size_t bytes_transferred = 0; do switch (op(core.engine_, ec, bytes_transferred)) { case engine::want_input_and_retry: // If the input buffer is empty then we need to read some more data from // the underlying transport. if (asio::buffer_size(core.input_) == 0) core.input_ = asio::buffer(core.input_buffer_, next_layer.read_some(core.input_buffer_, ec)); // Pass the new input data to the engine. core.input_ = core.engine_.put_input(core.input_); // Try the operation again. continue; case engine::want_output_and_retry: // Get output data from the engine and write it to the underlying // transport. asio::write(next_layer, core.engine_.get_output(core.output_buffer_), ec); // Try the operation again. continue; case engine::want_output: // Get output data from the engine and write it to the underlying // transport. asio::write(next_layer, core.engine_.get_output(core.output_buffer_), ec); // Operation is complete. Return result to caller. core.engine_.map_error_code(ec); return bytes_transferred; default: // Operation is complete. Return result to caller. core.engine_.map_error_code(ec); return bytes_transferred; } while (!ec); // Operation failed. Return result to caller. core.engine_.map_error_code(ec); return 0; } template class io_op { public: io_op(Stream& next_layer, stream_core& core, const Operation& op, Handler& handler) : next_layer_(next_layer), core_(core), op_(op), start_(0), want_(engine::want_nothing), bytes_transferred_(0), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } #if defined(ASIO_HAS_MOVE) io_op(const io_op& other) : next_layer_(other.next_layer_), core_(other.core_), op_(other.op_), start_(other.start_), want_(other.want_), ec_(other.ec_), bytes_transferred_(other.bytes_transferred_), handler_(other.handler_) { } io_op(io_op&& other) : next_layer_(other.next_layer_), core_(other.core_), op_(other.op_), start_(other.start_), want_(other.want_), ec_(other.ec_), bytes_transferred_(other.bytes_transferred_), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(asio::error_code ec, std::size_t bytes_transferred = ~std::size_t(0), int start = 0) { switch (start_ = start) { case 1: // Called after at least one async operation. do { switch (want_ = op_(core_.engine_, ec_, bytes_transferred_)) { case engine::want_input_and_retry: // If the input buffer already has data in it we can pass it to the // engine and then retry the operation immediately. if (asio::buffer_size(core_.input_) != 0) { core_.input_ = core_.engine_.put_input(core_.input_); continue; } // The engine wants more data to be read from input. However, we // cannot allow more than one read operation at a time on the // underlying transport. The pending_read_ timer's expiry is set to // pos_infin if a read is in progress, and neg_infin otherwise. if (core_.pending_read_.expires_at() == core_.neg_infin()) { // Prevent other read operations from being started. core_.pending_read_.expires_at(core_.pos_infin()); // Start reading some data from the underlying transport. next_layer_.async_read_some( asio::buffer(core_.input_buffer_), ASIO_MOVE_CAST(io_op)(*this)); } else { // Wait until the current read operation completes. core_.pending_read_.async_wait(ASIO_MOVE_CAST(io_op)(*this)); } // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; case engine::want_output_and_retry: case engine::want_output: // The engine wants some data to be written to the output. However, we // cannot allow more than one write operation at a time on the // underlying transport. The pending_write_ timer's expiry is set to // pos_infin if a write is in progress, and neg_infin otherwise. if (core_.pending_write_.expires_at() == core_.neg_infin()) { // Prevent other write operations from being started. core_.pending_write_.expires_at(core_.pos_infin()); // Start writing all the data to the underlying transport. asio::async_write(next_layer_, core_.engine_.get_output(core_.output_buffer_), ASIO_MOVE_CAST(io_op)(*this)); } else { // Wait until the current write operation completes. core_.pending_write_.async_wait(ASIO_MOVE_CAST(io_op)(*this)); } // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; default: // The SSL operation is done and we can invoke the handler, but we // have to keep in mind that this function might be being called from // the async operation's initiating function. In this case we're not // allowed to call the handler directly. Instead, issue a zero-sized // read so the handler runs "as-if" posted using io_service::post(). if (start) { next_layer_.async_read_some( asio::buffer(core_.input_buffer_, 0), ASIO_MOVE_CAST(io_op)(*this)); // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; } else { // Continue on to run handler directly. break; } } default: if (bytes_transferred == ~std::size_t(0)) bytes_transferred = 0; // Timer cancellation, no data transferred. else if (!ec_) ec_ = ec; switch (want_) { case engine::want_input_and_retry: // Add received data to the engine's input. core_.input_ = asio::buffer( core_.input_buffer_, bytes_transferred); core_.input_ = core_.engine_.put_input(core_.input_); // Release any waiting read operations. core_.pending_read_.expires_at(core_.neg_infin()); // Try the operation again. continue; case engine::want_output_and_retry: // Release any waiting write operations. core_.pending_write_.expires_at(core_.neg_infin()); // Try the operation again. continue; case engine::want_output: // Release any waiting write operations. core_.pending_write_.expires_at(core_.neg_infin()); // Fall through to call handler. default: // Pass the result to the handler. op_.call_handler(handler_, core_.engine_.map_error_code(ec_), ec_ ? 0 : bytes_transferred_); // Our work here is done. return; } } while (!ec_); // Operation failed. Pass the result to the handler. op_.call_handler(handler_, core_.engine_.map_error_code(ec_), 0); } } //private: Stream& next_layer_; stream_core& core_; Operation op_; int start_; engine::want want_; asio::error_code ec_; std::size_t bytes_transferred_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, io_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, io_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( io_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation(this_handler->handler_); } template inline void asio_handler_invoke(Function& function, io_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, io_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void async_io(Stream& next_layer, stream_core& core, const Operation& op, Handler& handler) { io_op( next_layer, core, op, handler)( asio::error_code(), 0, 1); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IO_HPP galera-26.4.3/asio/asio/ssl/detail/impl/0000775000177500017540000000000013540715002016250 5ustar dbartmygalera-26.4.3/asio/asio/ssl/detail/impl/openssl_init.ipp0000664000177500017540000001203713540715002021473 0ustar dbartmy// // ssl/detail/impl/openssl_init.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP #define ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/assert.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class openssl_init_base::do_init { public: do_init() { ::SSL_library_init(); ::SSL_load_error_strings(); ::OpenSSL_add_all_algorithms(); #if (OPENSSL_VERSION_NUMBER < 0x10100000L) mutexes_.resize(::CRYPTO_num_locks()); for (size_t i = 0; i < mutexes_.size(); ++i) mutexes_[i].reset(new asio::detail::mutex); ::CRYPTO_set_locking_callback(&do_init::openssl_locking_func); #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) #if (OPENSSL_VERSION_NUMBER < 0x10000000L) ::CRYPTO_set_id_callback(&do_init::openssl_id_func); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) null_compression_methods_ = sk_SSL_COMP_new_null(); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) } ~do_init() { #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) sk_SSL_COMP_free(null_compression_methods_); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) #if (OPENSSL_VERSION_NUMBER < 0x10000000L) ::CRYPTO_set_id_callback(0); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if (OPENSSL_VERSION_NUMBER < 0x10100000L) ::CRYPTO_set_locking_callback(0); ::ERR_free_strings(); ::EVP_cleanup(); ::CRYPTO_cleanup_all_ex_data(); #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) #if (OPENSSL_VERSION_NUMBER < 0x10000000L) ::ERR_remove_state(0); #elif (OPENSSL_VERSION_NUMBER < 0x10100000L) ::ERR_remove_thread_state(NULL); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if (OPENSSL_VERSION_NUMBER >= 0x10002000L) \ && (OPENSSL_VERSION_NUMBER < 0x10100000L) ::SSL_COMP_free_compression_methods(); #endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L) // && (OPENSSL_VERSION_NUMBER < 0x10100000L) #if !defined(OPENSSL_IS_BORINGSSL) ::CONF_modules_unload(1); #endif // !defined(OPENSSL_IS_BORINGSSL) #if !defined(OPENSSL_NO_ENGINE) \ && (OPENSSL_VERSION_NUMBER < 0x10100000L) ::ENGINE_cleanup(); #endif // !defined(OPENSSL_NO_ENGINE) // && (OPENSSL_VERSION_NUMBER < 0x10100000L) } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* get_null_compression_methods() const { return null_compression_methods_; } #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) private: #if (OPENSSL_VERSION_NUMBER < 0x10000000L) static unsigned long openssl_id_func() { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ::GetCurrentThreadId(); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) void* id = &errno; ASIO_ASSERT(sizeof(unsigned long) >= sizeof(void*)); return reinterpret_cast(id); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static void openssl_locking_func(int mode, int n, const char* /*file*/, int /*line*/) { if (mode & CRYPTO_LOCK) instance()->mutexes_[n]->lock(); else instance()->mutexes_[n]->unlock(); } // Mutexes to be used in locking callbacks. std::vector > mutexes_; #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* null_compression_methods_; #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) }; asio::detail::shared_ptr openssl_init_base::instance() { static asio::detail::shared_ptr init(new do_init); return init; } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* openssl_init_base::get_null_compression_methods() { return instance()->get_null_compression_methods(); } #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP galera-26.4.3/asio/asio/ssl/detail/impl/engine.ipp0000664000177500017540000001747113540715002020241 0ustar dbartmy// // ssl/detail/impl/engine.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IMPL_ENGINE_IPP #define ASIO_SSL_DETAIL_IMPL_ENGINE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/throw_error.hpp" # include "asio/error.hpp" # include "asio/ssl/detail/engine.hpp" # include "asio/ssl/error.hpp" # include "asio/ssl/verify_context.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) engine::engine(SSL_CTX* context) : ssl_(::SSL_new(context)) { if (!ssl_) { asio::error_code ec( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "engine"); } accept_mutex().init(); ::SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE); ::SSL_set_mode(ssl_, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); #if defined(SSL_MODE_RELEASE_BUFFERS) ::SSL_set_mode(ssl_, SSL_MODE_RELEASE_BUFFERS); #endif // defined(SSL_MODE_RELEASE_BUFFERS) ::BIO* int_bio = 0; ::BIO_new_bio_pair(&int_bio, 0, &ext_bio_, 0); ::SSL_set_bio(ssl_, int_bio, int_bio); } engine::~engine() { if (SSL_get_app_data(ssl_)) { delete static_cast(SSL_get_app_data(ssl_)); SSL_set_app_data(ssl_, 0); } ::BIO_free(ext_bio_); ::SSL_free(ssl_); } SSL* engine::native_handle() { return ssl_; } asio::error_code engine::set_verify_mode( verify_mode v, asio::error_code& ec) { ::SSL_set_verify(ssl_, v, ::SSL_get_verify_callback(ssl_)); ec = asio::error_code(); return ec; } asio::error_code engine::set_verify_depth( int depth, asio::error_code& ec) { ::SSL_set_verify_depth(ssl_, depth); ec = asio::error_code(); return ec; } asio::error_code engine::set_verify_callback( verify_callback_base* callback, asio::error_code& ec) { if (SSL_get_app_data(ssl_)) delete static_cast(SSL_get_app_data(ssl_)); SSL_set_app_data(ssl_, callback); ::SSL_set_verify(ssl_, ::SSL_get_verify_mode(ssl_), &engine::verify_callback_function); ec = asio::error_code(); return ec; } int engine::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_get_app_data(ssl)) { verify_callback_base* callback = static_cast( SSL_get_app_data(ssl)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } return 0; } engine::want engine::handshake( stream_base::handshake_type type, asio::error_code& ec) { return perform((type == asio::ssl::stream_base::client) ? &engine::do_connect : &engine::do_accept, 0, 0, ec, 0); } engine::want engine::shutdown(asio::error_code& ec) { return perform(&engine::do_shutdown, 0, 0, ec, 0); } engine::want engine::write(const asio::const_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred) { if (asio::buffer_size(data) == 0) { ec = asio::error_code(); return engine::want_nothing; } return perform(&engine::do_write, const_cast(asio::buffer_cast(data)), asio::buffer_size(data), ec, &bytes_transferred); } engine::want engine::read(const asio::mutable_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred) { if (asio::buffer_size(data) == 0) { ec = asio::error_code(); return engine::want_nothing; } return perform(&engine::do_read, asio::buffer_cast(data), asio::buffer_size(data), ec, &bytes_transferred); } asio::mutable_buffers_1 engine::get_output( const asio::mutable_buffer& data) { int length = ::BIO_read(ext_bio_, asio::buffer_cast(data), static_cast(asio::buffer_size(data))); return asio::buffer(data, length > 0 ? static_cast(length) : 0); } asio::const_buffer engine::put_input( const asio::const_buffer& data) { int length = ::BIO_write(ext_bio_, asio::buffer_cast(data), static_cast(asio::buffer_size(data))); return asio::buffer(data + (length > 0 ? static_cast(length) : 0)); } const asio::error_code& engine::map_error_code( asio::error_code& ec) const { // We only want to map the error::eof code. if (ec != asio::error::eof) return ec; // If there's data yet to be read, it's an error. if (BIO_wpending(ext_bio_)) { ec = asio::ssl::error::stream_truncated; return ec; } // SSL v2 doesn't provide a protocol-level shutdown, so an eof on the // underlying transport is passed through. #if (OPENSSL_VERSION_NUMBER < 0x10100000L) if (ssl_->version == SSL2_VERSION) return ec; #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) // Otherwise, the peer should have negotiated a proper shutdown. if ((::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) == 0) { ec = asio::ssl::error::stream_truncated; } return ec; } asio::detail::static_mutex& engine::accept_mutex() { static asio::detail::static_mutex mutex = ASIO_STATIC_MUTEX_INIT; return mutex; } engine::want engine::perform(int (engine::* op)(void*, std::size_t), void* data, std::size_t length, asio::error_code& ec, std::size_t* bytes_transferred) { std::size_t pending_output_before = ::BIO_ctrl_pending(ext_bio_); ::ERR_clear_error(); int result = (this->*op)(data, length); int ssl_error = ::SSL_get_error(ssl_, result); int sys_error = static_cast(::ERR_get_error()); std::size_t pending_output_after = ::BIO_ctrl_pending(ext_bio_); if (ssl_error == SSL_ERROR_SSL) { ec = asio::error_code(sys_error, asio::error::get_ssl_category()); return want_nothing; } if (ssl_error == SSL_ERROR_SYSCALL) { ec = asio::error_code(sys_error, asio::error::get_system_category()); return want_nothing; } if (result > 0 && bytes_transferred) *bytes_transferred = static_cast(result); if (ssl_error == SSL_ERROR_WANT_WRITE) { ec = asio::error_code(); return want_output_and_retry; } else if (pending_output_after > pending_output_before) { ec = asio::error_code(); return result > 0 ? want_output : want_output_and_retry; } else if (ssl_error == SSL_ERROR_WANT_READ) { ec = asio::error_code(); return want_input_and_retry; } else if (::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) { ec = asio::error::eof; return want_nothing; } else { ec = asio::error_code(); return want_nothing; } } int engine::do_accept(void*, std::size_t) { asio::detail::static_mutex::scoped_lock lock(accept_mutex()); return ::SSL_accept(ssl_); } int engine::do_connect(void*, std::size_t) { return ::SSL_connect(ssl_); } int engine::do_shutdown(void*, std::size_t) { int result = ::SSL_shutdown(ssl_); if (result == 0) result = ::SSL_shutdown(ssl_); return result; } int engine::do_read(void* data, std::size_t length) { return ::SSL_read(ssl_, data, length < INT_MAX ? static_cast(length) : INT_MAX); } int engine::do_write(void* data, std::size_t length) { return ::SSL_write(ssl_, data, length < INT_MAX ? static_cast(length) : INT_MAX); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IMPL_ENGINE_IPP galera-26.4.3/asio/asio/ssl/detail/stream_core.hpp0000664000177500017540000000712313540715002020326 0ustar dbartmy// // ssl/detail/stream_core.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_STREAM_CORE_HPP #define ASIO_SSL_DETAIL_STREAM_CORE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/deadline_timer.hpp" # else // defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/steady_timer.hpp" # endif // defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/ssl/detail/engine.hpp" # include "asio/buffer.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { #if !defined(ASIO_ENABLE_OLD_SSL) struct stream_core { // According to the OpenSSL documentation, this is the buffer size that is // sufficient to hold the largest possible TLS record. enum { max_tls_record_size = 17 * 1024 }; stream_core(SSL_CTX* context, asio::io_service& io_service) : engine_(context), pending_read_(io_service), pending_write_(io_service), output_buffer_space_(max_tls_record_size), output_buffer_(asio::buffer(output_buffer_space_)), input_buffer_space_(max_tls_record_size), input_buffer_(asio::buffer(input_buffer_space_)) { pending_read_.expires_at(neg_infin()); pending_write_.expires_at(neg_infin()); } ~stream_core() { } // The SSL engine. engine engine_; #if defined(ASIO_HAS_BOOST_DATE_TIME) // Timer used for storing queued read operations. asio::deadline_timer pending_read_; // Timer used for storing queued write operations. asio::deadline_timer pending_write_; // Helper function for obtaining a time value that always fires. static asio::deadline_timer::time_type neg_infin() { return boost::posix_time::neg_infin; } // Helper function for obtaining a time value that never fires. static asio::deadline_timer::time_type pos_infin() { return boost::posix_time::pos_infin; } #else // defined(ASIO_HAS_BOOST_DATE_TIME) // Timer used for storing queued read operations. asio::steady_timer pending_read_; // Timer used for storing queued write operations. asio::steady_timer pending_write_; // Helper function for obtaining a time value that always fires. static asio::steady_timer::time_point neg_infin() { return (asio::steady_timer::time_point::min)(); } // Helper function for obtaining a time value that never fires. static asio::steady_timer::time_point pos_infin() { return (asio::steady_timer::time_point::max)(); } #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // Buffer space used to prepare output intended for the transport. std::vector output_buffer_space_; // A buffer that may be used to prepare output intended for the transport. const asio::mutable_buffers_1 output_buffer_; // Buffer space used to read input intended for the engine. std::vector input_buffer_space_; // A buffer that may be used to read input intended for the engine. const asio::mutable_buffers_1 input_buffer_; // The buffer pointing to the engine's unconsumed input. asio::const_buffer input_; }; #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_STREAM_CORE_HPP galera-26.4.3/asio/asio/ssl/stream_service.hpp0000664000177500017540000000165113540715002017574 0ustar dbartmy// // ssl/stream_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_SERVICE_HPP #define ASIO_SSL_STREAM_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/stream_service.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::stream_service; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_SERVICE_HPP galera-26.4.3/asio/asio/ssl/verify_context.hpp0000664000177500017540000000342113540715002017626 0ustar dbartmy// // ssl/verify_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_VERIFY_CONTEXT_HPP #define ASIO_SSL_VERIFY_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/noncopyable.hpp" # include "asio/ssl/detail/openssl_types.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) /// A simple wrapper around the X509_STORE_CTX type, used during verification of /// a peer certificate. /** * @note The verify_context does not own the underlying X509_STORE_CTX object. */ class verify_context : private noncopyable { public: /// The native handle type of the verification context. typedef X509_STORE_CTX* native_handle_type; /// Constructor. explicit verify_context(native_handle_type handle) : handle_(handle) { } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ native_handle_type native_handle() { return handle_; } private: // The underlying native implementation. native_handle_type handle_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_VERIFY_CONTEXT_HPP galera-26.4.3/asio/asio/ssl/stream_base.hpp0000664000177500017540000000216613540715002017050 0ustar dbartmy// // ssl/stream_base.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_BASE_HPP #define ASIO_SSL_STREAM_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// The stream_base class is used as a base for the asio::ssl::stream /// class template so that we have a common place to define various enums. class stream_base { public: /// Different handshake types. enum handshake_type { /// Perform handshaking as a client. client, /// Perform handshaking as a server. server }; protected: /// Protected destructor to prevent deletion through this type. ~stream_base() { } }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_BASE_HPP galera-26.4.3/asio/asio/ssl/basic_context.hpp0000664000177500017540000000164213540715002017406 0ustar dbartmy// // ssl/basic_context.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_BASIC_CONTEXT_HPP #define ASIO_SSL_BASIC_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/basic_context.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::basic_context; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_BASIC_CONTEXT_HPP galera-26.4.3/asio/asio/ssl/context.hpp0000664000177500017540000006375113540715002016256 0ustar dbartmy// // ssl/context.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_HPP #define ASIO_SSL_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/basic_context.hpp" # include "asio/ssl/context_service.hpp" #else // defined(ASIO_ENABLE_OLD_SSL) # include # include "asio/buffer.hpp" # include "asio/io_service.hpp" # include "asio/ssl/context_base.hpp" # include "asio/ssl/detail/openssl_types.hpp" # include "asio/ssl/detail/openssl_init.hpp" # include "asio/ssl/detail/password_callback.hpp" # include "asio/ssl/detail/verify_callback.hpp" # include "asio/ssl/verify_mode.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) /// Typedef for the typical usage of context. typedef basic_context context; #else // defined(ASIO_ENABLE_OLD_SSL) class context : public context_base, private noncopyable { public: /// The native handle type of the SSL context. typedef SSL_CTX* native_handle_type; /// (Deprecated: Use native_handle_type.) The native type of the SSL context. typedef SSL_CTX* impl_type; /// Constructor. ASIO_DECL explicit context(method m); /// Deprecated constructor taking a reference to an io_service object. ASIO_DECL context(asio::io_service&, method m); #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a context from another. /** * This constructor moves an SSL context from one object to another. * * @param other The other context object from which the move will occur. * * @note Following the move, the following operations only are valid for the * moved-from object: * @li Destruction. * @li As a target for move-assignment. */ ASIO_DECL context(context&& other); /// Move-assign a context from another. /** * This assignment operator moves an SSL context from one object to another. * * @param other The other context object from which the move will occur. * * @note Following the move, the following operations only are valid for the * moved-from object: * @li Destruction. * @li As a target for move-assignment. */ ASIO_DECL context& operator=(context&& other); #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. ASIO_DECL ~context(); /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ ASIO_DECL native_handle_type native_handle(); /// (Deprecated: Use native_handle().) Get the underlying implementation in /// the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ ASIO_DECL impl_type impl(); /// Clear options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The specified options, if currently enabled on the * context, are cleared. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_clear_options. */ ASIO_DECL void clear_options(options o); /// Clear options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The specified options, if currently enabled on the * context, are cleared. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_clear_options. */ ASIO_DECL asio::error_code clear_options(options o, asio::error_code& ec); /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_options. */ ASIO_DECL void set_options(options o); /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_options. */ ASIO_DECL asio::error_code set_options(options o, asio::error_code& ec); /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify. */ ASIO_DECL void set_verify_mode(verify_mode v); /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify. */ ASIO_DECL asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec); /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the context. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify_depth. */ ASIO_DECL void set_verify_depth(int depth); /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the context. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify_depth. */ ASIO_DECL asio::error_code set_verify_depth( int depth, asio::error_code& ec); /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify. */ template void set_verify_callback(VerifyCallback callback); /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify. */ template asio::error_code set_verify_callback(VerifyCallback callback, asio::error_code& ec); /// Load a certification authority file for performing verification. /** * This function is used to load one or more trusted certification authorities * from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL void load_verify_file(const std::string& filename); /// Load a certification authority file for performing verification. /** * This function is used to load the certificates for one or more trusted * certification authorities from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL asio::error_code load_verify_file( const std::string& filename, asio::error_code& ec); /// Add certification authority for performing verification. /** * This function is used to add one trusted certification authority * from a memory buffer. * * @param ca The buffer containing the certification authority certificate. * The certificate must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert. */ ASIO_DECL void add_certificate_authority(const const_buffer& ca); /// Add certification authority for performing verification. /** * This function is used to add one trusted certification authority * from a memory buffer. * * @param ca The buffer containing the certification authority certificate. * The certificate must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert. */ ASIO_DECL asio::error_code add_certificate_authority( const const_buffer& ca, asio::error_code& ec); /// Configures the context to use the default directories for finding /// certification authority certificates. /** * This function specifies that the context should use the default, * system-dependent directories for locating certification authority * certificates. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_default_verify_paths. */ ASIO_DECL void set_default_verify_paths(); /// Configures the context to use the default directories for finding /// certification authority certificates. /** * This function specifies that the context should use the default, * system-dependent directories for locating certification authority * certificates. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_default_verify_paths. */ ASIO_DECL asio::error_code set_default_verify_paths( asio::error_code& ec); /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL void add_verify_path(const std::string& path); /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL asio::error_code add_verify_path( const std::string& path, asio::error_code& ec); /// Use a certificate from a memory buffer. /** * This function is used to load a certificate into the context from a buffer. * * @param certificate The buffer containing the certificate. * * @param format The certificate format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1. */ ASIO_DECL void use_certificate( const const_buffer& certificate, file_format format); /// Use a certificate from a memory buffer. /** * This function is used to load a certificate into the context from a buffer. * * @param certificate The buffer containing the certificate. * * @param format The certificate format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1. */ ASIO_DECL asio::error_code use_certificate( const const_buffer& certificate, file_format format, asio::error_code& ec); /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate_file. */ ASIO_DECL void use_certificate_file( const std::string& filename, file_format format); /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate_file. */ ASIO_DECL asio::error_code use_certificate_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use a certificate chain from a memory buffer. /** * This function is used to load a certificate chain into the context from a * buffer. * * @param chain The buffer containing the certificate chain. The certificate * chain must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert. */ ASIO_DECL void use_certificate_chain(const const_buffer& chain); /// Use a certificate chain from a memory buffer. /** * This function is used to load a certificate chain into the context from a * buffer. * * @param chain The buffer containing the certificate chain. The certificate * chain must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert. */ ASIO_DECL asio::error_code use_certificate_chain( const const_buffer& chain, asio::error_code& ec); /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate_chain_file. */ ASIO_DECL void use_certificate_chain_file(const std::string& filename); /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate_chain_file. */ ASIO_DECL asio::error_code use_certificate_chain_file( const std::string& filename, asio::error_code& ec); /// Use a private key from a memory buffer. /** * This function is used to load a private key into the context from a buffer. * * @param private_key The buffer containing the private key. * * @param format The private key format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1. */ ASIO_DECL void use_private_key( const const_buffer& private_key, file_format format); /// Use a private key from a memory buffer. /** * This function is used to load a private key into the context from a buffer. * * @param private_key The buffer containing the private key. * * @param format The private key format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1. */ ASIO_DECL asio::error_code use_private_key( const const_buffer& private_key, file_format format, asio::error_code& ec); /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_PrivateKey_file. */ ASIO_DECL void use_private_key_file( const std::string& filename, file_format format); /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_PrivateKey_file. */ ASIO_DECL asio::error_code use_private_key_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use an RSA private key from a memory buffer. /** * This function is used to load an RSA private key into the context from a * buffer. * * @param private_key The buffer containing the RSA private key. * * @param format The private key format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1. */ ASIO_DECL void use_rsa_private_key( const const_buffer& private_key, file_format format); /// Use an RSA private key from a memory buffer. /** * This function is used to load an RSA private key into the context from a * buffer. * * @param private_key The buffer containing the RSA private key. * * @param format The private key format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1. */ ASIO_DECL asio::error_code use_rsa_private_key( const const_buffer& private_key, file_format format, asio::error_code& ec); /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_RSAPrivateKey_file. */ ASIO_DECL void use_rsa_private_key_file( const std::string& filename, file_format format); /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_RSAPrivateKey_file. */ ASIO_DECL asio::error_code use_rsa_private_key_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use the specified memory buffer to obtain the temporary Diffie-Hellman /// parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a buffer. * * @param dh The memory buffer containing the Diffie-Hellman parameters. The * buffer must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL void use_tmp_dh(const const_buffer& dh); /// Use the specified memory buffer to obtain the temporary Diffie-Hellman /// parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a buffer. * * @param dh The memory buffer containing the Diffie-Hellman parameters. The * buffer must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL asio::error_code use_tmp_dh( const const_buffer& dh, asio::error_code& ec); /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL void use_tmp_dh_file(const std::string& filename); /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL asio::error_code use_tmp_dh_file( const std::string& filename, asio::error_code& ec); /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_default_passwd_cb. */ template void set_password_callback(PasswordCallback callback); /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_default_passwd_cb. */ template asio::error_code set_password_callback(PasswordCallback callback, asio::error_code& ec); private: struct bio_cleanup; struct x509_cleanup; struct evp_pkey_cleanup; struct rsa_cleanup; struct dh_cleanup; // Helper function used to set a peer certificate verification callback. ASIO_DECL asio::error_code do_set_verify_callback( detail::verify_callback_base* callback, asio::error_code& ec); // Callback used when the SSL implementation wants to verify a certificate. ASIO_DECL static int verify_callback_function( int preverified, X509_STORE_CTX* ctx); // Helper function used to set a password callback. ASIO_DECL asio::error_code do_set_password_callback( detail::password_callback_base* callback, asio::error_code& ec); // Callback used when the SSL implementation wants a password. ASIO_DECL static int password_callback_function( char* buf, int size, int purpose, void* data); // Helper function to set the temporary Diffie-Hellman parameters from a BIO. ASIO_DECL asio::error_code do_use_tmp_dh( BIO* bio, asio::error_code& ec); // Helper function to make a BIO from a memory buffer. ASIO_DECL BIO* make_buffer_bio(const const_buffer& b); // The underlying native implementation. native_handle_type handle_; // Ensure openssl is initialised. asio::ssl::detail::openssl_init<> init_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ssl/impl/context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/context.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_CONTEXT_HPP galera-26.4.3/asio/asio/ssl/verify_mode.hpp0000664000177500017540000000321513540715002017067 0ustar dbartmy// // ssl/verify_mode.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_VERIFY_MODE_HPP #define ASIO_SSL_VERIFY_MODE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// Bitmask type for peer verification. /** * Possible values are: * * @li @ref verify_none * @li @ref verify_peer * @li @ref verify_fail_if_no_peer_cert * @li @ref verify_client_once */ typedef int verify_mode; #if defined(GENERATING_DOCUMENTATION) /// No verification. const int verify_none = implementation_defined; /// Verify the peer. const int verify_peer = implementation_defined; /// Fail verification if the peer has no certificate. Ignored unless /// @ref verify_peer is set. const int verify_fail_if_no_peer_cert = implementation_defined; /// Do not request client certificate on renegotiation. Ignored unless /// @ref verify_peer is set. const int verify_client_once = implementation_defined; #else const int verify_none = SSL_VERIFY_NONE; const int verify_peer = SSL_VERIFY_PEER; const int verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT; const int verify_client_once = SSL_VERIFY_CLIENT_ONCE; #endif } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_VERIFY_MODE_HPP galera-26.4.3/asio/asio/ssl/context_service.hpp0000664000177500017540000000166213540715002017767 0ustar dbartmy// // ssl/context_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_SERVICE_HPP #define ASIO_SSL_CONTEXT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/context_service.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::context_service; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_CONTEXT_SERVICE_HPP galera-26.4.3/asio/asio/ssl/impl/0000775000177500017540000000000013540715002015006 5ustar dbartmygalera-26.4.3/asio/asio/ssl/impl/context.ipp0000664000177500017540000006325013540715002017212 0ustar dbartmy// // ssl/impl/context.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_CONTEXT_IPP #define ASIO_SSL_IMPL_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include "asio/detail/throw_error.hpp" # include "asio/error.hpp" # include "asio/ssl/context.hpp" # include "asio/ssl/error.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) struct context::bio_cleanup { BIO* p; ~bio_cleanup() { if (p) ::BIO_free(p); } }; struct context::x509_cleanup { X509* p; ~x509_cleanup() { if (p) ::X509_free(p); } }; struct context::evp_pkey_cleanup { EVP_PKEY* p; ~evp_pkey_cleanup() { if (p) ::EVP_PKEY_free(p); } }; struct context::rsa_cleanup { RSA* p; ~rsa_cleanup() { if (p) ::RSA_free(p); } }; struct context::dh_cleanup { DH* p; ~dh_cleanup() { if (p) ::DH_free(p); } }; context::context(context::method m) : handle_(0) { ::ERR_clear_error(); switch (m) { #if defined(OPENSSL_NO_SSL2) \ || (OPENSSL_VERSION_NUMBER >= 0x10100000L) case context::sslv2: case context::sslv2_client: case context::sslv2_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #else // defined(OPENSSL_NO_SSL2) // || (OPENSSL_VERSION_NUMBER >= 0x10100000L) case context::sslv2: handle_ = ::SSL_CTX_new(::SSLv2_method()); break; case context::sslv2_client: handle_ = ::SSL_CTX_new(::SSLv2_client_method()); break; case context::sslv2_server: handle_ = ::SSL_CTX_new(::SSLv2_server_method()); break; #endif // defined(OPENSSL_NO_SSL2) // || (OPENSSL_VERSION_NUMBER >= 0x10100000L) #if defined(OPENSSL_NO_SSL3) case context::sslv3: case context::sslv3_client: case context::sslv3_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #else // defined(OPENSSL_NO_SSL3) case context::sslv3: handle_ = ::SSL_CTX_new(::SSLv3_method()); break; case context::sslv3_client: handle_ = ::SSL_CTX_new(::SSLv3_client_method()); break; case context::sslv3_server: handle_ = ::SSL_CTX_new(::SSLv3_server_method()); break; #endif // defined(OPENSSL_NO_SSL3) #if (OPENSSL_VERSION_NUMBER < 0x10100000L) case context::tlsv1: handle_ = ::SSL_CTX_new(::TLSv1_method()); break; case context::tlsv1_client: handle_ = ::SSL_CTX_new(::TLSv1_client_method()); break; case context::tlsv1_server: handle_ = ::SSL_CTX_new(::TLSv1_server_method()); break; #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) case context::sslv23: handle_ = ::SSL_CTX_new(::SSLv23_method()); break; case context::sslv23_client: handle_ = ::SSL_CTX_new(::SSLv23_client_method()); break; case context::sslv23_server: handle_ = ::SSL_CTX_new(::SSLv23_server_method()); break; #if (OPENSSL_VERSION_NUMBER < 0x10100000L) #if defined(SSL_TXT_TLSV1_1) case context::tlsv11: handle_ = ::SSL_CTX_new(::TLSv1_1_method()); break; case context::tlsv11_client: handle_ = ::SSL_CTX_new(::TLSv1_1_client_method()); break; case context::tlsv11_server: handle_ = ::SSL_CTX_new(::TLSv1_1_server_method()); break; #else // defined(SSL_TXT_TLSV1_1) case context::tlsv11: case context::tlsv11_client: case context::tlsv11_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_1) #if defined(SSL_TXT_TLSV1_2) case context::tlsv12: handle_ = ::SSL_CTX_new(::TLSv1_2_method()); break; case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLSv1_2_client_method()); break; case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLSv1_2_server_method()); break; #else // defined(SSL_TXT_TLSV1_2) case context::tlsv12: case context::tlsv12_client: case context::tlsv12_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_2) #else // (OPENSSL_VERSION_NUMBER < 0x10100000L) case context::tlsv1: case context::tlsv11: case context::tlsv12: handle_ = ::SSL_CTX_new(::TLS_method()); break; case context::tlsv1_client: case context::tlsv11_client: case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); break; case context::tlsv1_server: case context::tlsv11_server: case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); break; #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) default: handle_ = ::SSL_CTX_new(0); break; } if (handle_ == 0) { asio::error_code ec( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "context"); } set_options(no_compression); } context::context(asio::io_service&, context::method m) : handle_(0) { context tmp(m); handle_ = tmp.handle_; tmp.handle_ = 0; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::context(context&& other) { handle_ = other.handle_; other.handle_ = 0; } context& context::operator=(context&& other) { context tmp(ASIO_MOVE_CAST(context)(*this)); handle_ = other.handle_; other.handle_ = 0; return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::~context() { if (handle_) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) if (cb_userdata) { detail::password_callback_base* callback = static_cast( cb_userdata); delete callback; #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) ::SSL_CTX_set_default_passwd_cb_userdata(handle_, 0); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) handle_->default_passwd_callback_userdata = 0; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) } if (SSL_CTX_get_app_data(handle_)) { detail::verify_callback_base* callback = static_cast( SSL_CTX_get_app_data(handle_)); delete callback; SSL_CTX_set_app_data(handle_, 0); } ::SSL_CTX_free(handle_); } } context::native_handle_type context::native_handle() { return handle_; } context::impl_type context::impl() { return handle_; } void context::clear_options(context::options o) { asio::error_code ec; clear_options(o, ec); asio::detail::throw_error(ec, "clear_options"); } asio::error_code context::clear_options( context::options o, asio::error_code& ec) { #if (OPENSSL_VERSION_NUMBER >= 0x009080DFL) \ && (OPENSSL_VERSION_NUMBER != 0x00909000L) # if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { # if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = SSL_COMP_get_compression_methods(); # endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } # endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_clear_options(handle_, o); ec = asio::error_code(); #else // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) (void)o; ec = asio::error::operation_not_supported; #endif // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) return ec; } void context::set_options(context::options o) { asio::error_code ec; set_options(o, ec); asio::detail::throw_error(ec, "set_options"); } asio::error_code context::set_options( context::options o, asio::error_code& ec) { #if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { #if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = asio::ssl::detail::openssl_init<>::get_null_compression_methods(); #endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } #endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_set_options(handle_, o); ec = asio::error_code(); return ec; } void context::set_verify_mode(verify_mode v) { asio::error_code ec; set_verify_mode(v, ec); asio::detail::throw_error(ec, "set_verify_mode"); } asio::error_code context::set_verify_mode( verify_mode v, asio::error_code& ec) { ::SSL_CTX_set_verify(handle_, v, ::SSL_CTX_get_verify_callback(handle_)); ec = asio::error_code(); return ec; } void context::set_verify_depth(int depth) { asio::error_code ec; set_verify_depth(depth, ec); asio::detail::throw_error(ec, "set_verify_depth"); } asio::error_code context::set_verify_depth( int depth, asio::error_code& ec) { ::SSL_CTX_set_verify_depth(handle_, depth); ec = asio::error_code(); return ec; } void context::load_verify_file(const std::string& filename) { asio::error_code ec; load_verify_file(filename, ec); asio::detail::throw_error(ec, "load_verify_file"); } asio::error_code context::load_verify_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, filename.c_str(), 0) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::add_certificate_authority(const const_buffer& ca) { asio::error_code ec; add_certificate_authority(ca, ec); asio::detail::throw_error(ec, "add_certificate_authority"); } asio::error_code context::add_certificate_authority( const const_buffer& ca, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(ca) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (cert.p) { if (X509_STORE* store = ::SSL_CTX_get_cert_store(handle_)) { if (::X509_STORE_add_cert(store, cert.p) == 1) { ec = asio::error_code(); return ec; } } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::set_default_verify_paths() { asio::error_code ec; set_default_verify_paths(ec); asio::detail::throw_error(ec, "set_default_verify_paths"); } asio::error_code context::set_default_verify_paths( asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_set_default_verify_paths(handle_) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::add_verify_path(const std::string& path) { asio::error_code ec; add_verify_path(path, ec); asio::detail::throw_error(ec, "add_verify_path"); } asio::error_code context::add_verify_path( const std::string& path, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, 0, path.c_str()) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_certificate( const const_buffer& certificate, file_format format) { asio::error_code ec; use_certificate(certificate, format, ec); asio::detail::throw_error(ec, "use_certificate"); } asio::error_code context::use_certificate( const const_buffer& certificate, file_format format, asio::error_code& ec) { ::ERR_clear_error(); if (format == context_base::asn1) { if (::SSL_CTX_use_certificate_ASN1(handle_, static_cast(buffer_size(certificate)), buffer_cast(certificate)) == 1) { ec = asio::error_code(); return ec; } } else if (format == context_base::pem) { bio_cleanup bio = { make_buffer_bio(certificate) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (cert.p) { if (::SSL_CTX_use_certificate(handle_, cert.p) == 1) { ec = asio::error_code(); return ec; } } } } else { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_certificate_file( const std::string& filename, file_format format) { asio::error_code ec; use_certificate_file(filename, format, ec); asio::detail::throw_error(ec, "use_certificate_file"); } asio::error_code context::use_certificate_file( const std::string& filename, file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } ::ERR_clear_error(); if (::SSL_CTX_use_certificate_file(handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_certificate_chain(const const_buffer& chain) { asio::error_code ec; use_certificate_chain(chain, ec); asio::detail::throw_error(ec, "use_certificate_chain"); } asio::error_code context::use_certificate_chain( const const_buffer& chain, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(chain) }; if (bio.p) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) x509_cleanup cert = { ::PEM_read_bio_X509_AUX(bio.p, 0, callback, cb_userdata) }; if (!cert.p) { ec = asio::error_code(ERR_R_PEM_LIB, asio::error::get_ssl_category()); return ec; } int result = ::SSL_CTX_use_certificate(handle_, cert.p); if (result == 0 || ::ERR_peek_error() != 0) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } #if (OPENSSL_VERSION_NUMBER >= 0x10002000L) && !defined(LIBRESSL_VERSION_NUMBER) ::SSL_CTX_clear_chain_certs(handle_); #else if (handle_->extra_certs) { ::sk_X509_pop_free(handle_->extra_certs, X509_free); handle_->extra_certs = 0; } #endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L) while (X509* cacert = ::PEM_read_bio_X509(bio.p, 0, callback, cb_userdata)) { if (!::SSL_CTX_add_extra_chain_cert(handle_, cacert)) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } } result = ::ERR_peek_last_error(); if ((ERR_GET_LIB(result) == ERR_LIB_PEM) && (ERR_GET_REASON(result) == PEM_R_NO_START_LINE)) { ::ERR_clear_error(); ec = asio::error_code(); return ec; } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_certificate_chain_file(const std::string& filename) { asio::error_code ec; use_certificate_chain_file(filename, ec); asio::detail::throw_error(ec, "use_certificate_chain_file"); } asio::error_code context::use_certificate_chain_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_use_certificate_chain_file(handle_, filename.c_str()) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_private_key( const const_buffer& private_key, context::file_format format) { asio::error_code ec; use_private_key(private_key, format, ec); asio::detail::throw_error(ec, "use_private_key"); } asio::error_code context::use_private_key( const const_buffer& private_key, context::file_format format, asio::error_code& ec) { ::ERR_clear_error(); #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { evp_pkey_cleanup evp_private_key = { 0 }; switch (format) { case context_base::asn1: evp_private_key.p = ::d2i_PrivateKey_bio(bio.p, 0); break; case context_base::pem: evp_private_key.p = ::PEM_read_bio_PrivateKey( bio.p, 0, callback, cb_userdata); break; default: { ec = asio::error::invalid_argument; return ec; } } if (evp_private_key.p) { if (::SSL_CTX_use_PrivateKey(handle_, evp_private_key.p) == 1) { ec = asio::error_code(); return ec; } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_private_key_file( const std::string& filename, context::file_format format) { asio::error_code ec; use_private_key_file(filename, format, ec); asio::detail::throw_error(ec, "use_private_key_file"); } void context::use_rsa_private_key( const const_buffer& private_key, context::file_format format) { asio::error_code ec; use_rsa_private_key(private_key, format, ec); asio::detail::throw_error(ec, "use_rsa_private_key"); } asio::error_code context::use_rsa_private_key( const const_buffer& private_key, context::file_format format, asio::error_code& ec) { ::ERR_clear_error(); #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { rsa_cleanup rsa_private_key = { 0 }; switch (format) { case context_base::asn1: rsa_private_key.p = ::d2i_RSAPrivateKey_bio(bio.p, 0); break; case context_base::pem: rsa_private_key.p = ::PEM_read_bio_RSAPrivateKey( bio.p, 0, callback, cb_userdata); break; default: { ec = asio::error::invalid_argument; return ec; } } if (rsa_private_key.p) { if (::SSL_CTX_use_RSAPrivateKey(handle_, rsa_private_key.p) == 1) { ec = asio::error_code(); return ec; } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } asio::error_code context::use_private_key_file( const std::string& filename, context::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } ::ERR_clear_error(); if (::SSL_CTX_use_PrivateKey_file(handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_rsa_private_key_file( const std::string& filename, context::file_format format) { asio::error_code ec; use_rsa_private_key_file(filename, format, ec); asio::detail::throw_error(ec, "use_rsa_private_key_file"); } asio::error_code context::use_rsa_private_key_file( const std::string& filename, context::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } ::ERR_clear_error(); if (::SSL_CTX_use_RSAPrivateKey_file( handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } ec = asio::error_code(); return ec; } void context::use_tmp_dh(const const_buffer& dh) { asio::error_code ec; use_tmp_dh(dh, ec); asio::detail::throw_error(ec, "use_tmp_dh"); } asio::error_code context::use_tmp_dh( const const_buffer& dh, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(dh) }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } void context::use_tmp_dh_file(const std::string& filename) { asio::error_code ec; use_tmp_dh_file(filename, ec); asio::detail::throw_error(ec, "use_tmp_dh_file"); } asio::error_code context::use_tmp_dh_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { ::BIO_new_file(filename.c_str(), "r") }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } asio::error_code context::do_use_tmp_dh( BIO* bio, asio::error_code& ec) { ::ERR_clear_error(); dh_cleanup dh = { ::PEM_read_bio_DHparams(bio, 0, 0, 0) }; if (dh.p) { if (::SSL_CTX_set_tmp_dh(handle_, dh.p) == 1) { ec = asio::error_code(); return ec; } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); return ec; } asio::error_code context::do_set_verify_callback( detail::verify_callback_base* callback, asio::error_code& ec) { if (SSL_CTX_get_app_data(handle_)) { delete static_cast( SSL_CTX_get_app_data(handle_)); } SSL_CTX_set_app_data(handle_, callback); ::SSL_CTX_set_verify(handle_, ::SSL_CTX_get_verify_mode(handle_), &context::verify_callback_function); ec = asio::error_code(); return ec; } int context::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_CTX* handle = ::SSL_get_SSL_CTX(ssl)) { if (SSL_CTX_get_app_data(handle)) { detail::verify_callback_base* callback = static_cast( SSL_CTX_get_app_data(handle)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } } return 0; } asio::error_code context::do_set_password_callback( detail::password_callback_base* callback, asio::error_code& ec) { #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* old_callback = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); ::SSL_CTX_set_default_passwd_cb_userdata(handle_, callback); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* old_callback = handle_->default_passwd_callback_userdata; handle_->default_passwd_callback_userdata = callback; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) if (old_callback) delete static_cast( old_callback); SSL_CTX_set_default_passwd_cb(handle_, &context::password_callback_function); ec = asio::error_code(); return ec; } int context::password_callback_function( char* buf, int size, int purpose, void* data) { using namespace std; // For strncat and strlen. if (data) { detail::password_callback_base* callback = static_cast(data); std::string passwd = callback->call(static_cast(size), purpose ? context_base::for_writing : context_base::for_reading); #if defined(ASIO_HAS_SECURE_RTL) strcpy_s(buf, size, passwd.c_str()); #else // defined(ASIO_HAS_SECURE_RTL) *buf = '\0'; if (size > 0) strncat(buf, passwd.c_str(), size - 1); #endif // defined(ASIO_HAS_SECURE_RTL) return static_cast(strlen(buf)); } return 0; } BIO* context::make_buffer_bio(const const_buffer& b) { return ::BIO_new_mem_buf( const_cast(buffer_cast(b)), static_cast(buffer_size(b))); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_CONTEXT_IPP galera-26.4.3/asio/asio/ssl/impl/error.ipp0000664000177500017540000000366213540715002016660 0ustar dbartmy// // ssl/impl/error.ipp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_ERROR_IPP #define ASIO_SSL_IMPL_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/error.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { namespace detail { class ssl_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.ssl"; } std::string message(int value) const { const char* s = ::ERR_reason_error_string(value); return s ? s : "asio.ssl error"; } }; } // namespace detail const asio::error_category& get_ssl_category() { static detail::ssl_category instance; return instance; } } // namespace error namespace ssl { namespace error { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) && !defined(OPENSSL_IS_BORINGSSL) const asio::error_category& get_stream_category() { return asio::error::get_ssl_category(); } #else namespace detail { class stream_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.ssl.stream"; } std::string message(int value) const { switch (value) { case stream_truncated: return "stream truncated"; default: return "asio.ssl.stream error"; } } }; } // namespace detail const asio::error_category& get_stream_category() { static detail::stream_category instance; return instance; } #endif } // namespace error } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_ERROR_IPP galera-26.4.3/asio/asio/ssl/impl/rfc2818_verification.ipp0000664000177500017540000001131613540715002021361 0ustar dbartmy// // ssl/impl/rfc2818_verification.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP #define ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include # include # include "asio/ip/address.hpp" # include "asio/ssl/rfc2818_verification.hpp" # include "asio/ssl/detail/openssl_types.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) bool rfc2818_verification::operator()( bool preverified, verify_context& ctx) const { using namespace std; // For memcmp. // Don't bother looking at certificates that have failed pre-verification. if (!preverified) return false; // We're only interested in checking the certificate at the end of the chain. int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle()); if (depth > 0) return true; // Try converting the host name to an address. If it is an address then we // need to look for an IP address in the certificate rather than a host name. asio::error_code ec; ip::address address = ip::address::from_string(host_, ec); bool is_address = !ec; X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle()); // Go through the alternate names in the certificate looking for matching DNS // or IP address entries. GENERAL_NAMES* gens = static_cast( X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0)); for (int i = 0; i < sk_GENERAL_NAME_num(gens); ++i) { GENERAL_NAME* gen = sk_GENERAL_NAME_value(gens, i); if (gen->type == GEN_DNS && !is_address) { ASN1_IA5STRING* domain = gen->d.dNSName; if (domain->type == V_ASN1_IA5STRING && domain->data && domain->length) { const char* pattern = reinterpret_cast(domain->data); std::size_t pattern_length = domain->length; if (match_pattern(pattern, pattern_length, host_.c_str())) { GENERAL_NAMES_free(gens); return true; } } } else if (gen->type == GEN_IPADD && is_address) { ASN1_OCTET_STRING* ip_address = gen->d.iPAddress; if (ip_address->type == V_ASN1_OCTET_STRING && ip_address->data) { if (address.is_v4() && ip_address->length == 4) { ip::address_v4::bytes_type bytes = address.to_v4().to_bytes(); if (memcmp(bytes.data(), ip_address->data, 4) == 0) { GENERAL_NAMES_free(gens); return true; } } else if (address.is_v6() && ip_address->length == 16) { ip::address_v6::bytes_type bytes = address.to_v6().to_bytes(); if (memcmp(bytes.data(), ip_address->data, 16) == 0) { GENERAL_NAMES_free(gens); return true; } } } } } GENERAL_NAMES_free(gens); // No match in the alternate names, so try the common names. We should only // use the "most specific" common name, which is the last one in the list. X509_NAME* name = X509_get_subject_name(cert); int i = -1; ASN1_STRING* common_name = 0; while ((i = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0) { X509_NAME_ENTRY* name_entry = X509_NAME_get_entry(name, i); common_name = X509_NAME_ENTRY_get_data(name_entry); } if (common_name && common_name->data && common_name->length) { const char* pattern = reinterpret_cast(common_name->data); std::size_t pattern_length = common_name->length; if (match_pattern(pattern, pattern_length, host_.c_str())) return true; } return false; } bool rfc2818_verification::match_pattern(const char* pattern, std::size_t pattern_length, const char* host) { using namespace std; // For tolower. const char* p = pattern; const char* p_end = p + pattern_length; const char* h = host; while (p != p_end && *h) { if (*p == '*') { ++p; while (*h && *h != '.') if (match_pattern(p, p_end - p, h++)) return true; } else if (tolower(*p) == tolower(*h)) { ++p; ++h; } else { return false; } } return p == p_end && !*h; } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP galera-26.4.3/asio/asio/ssl/impl/context.hpp0000664000177500017540000000355113540715002017207 0ustar dbartmy// // ssl/impl/context.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_CONTEXT_HPP #define ASIO_SSL_IMPL_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_ENABLE_OLD_SSL) # include "asio/detail/throw_error.hpp" #endif // !defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if !defined(ASIO_ENABLE_OLD_SSL) template void context::set_verify_callback(VerifyCallback callback) { asio::error_code ec; this->set_verify_callback(callback, ec); asio::detail::throw_error(ec, "set_verify_callback"); } template asio::error_code context::set_verify_callback( VerifyCallback callback, asio::error_code& ec) { return do_set_verify_callback( new detail::verify_callback(callback), ec); } template void context::set_password_callback(PasswordCallback callback) { asio::error_code ec; this->set_password_callback(callback, ec); asio::detail::throw_error(ec, "set_password_callback"); } template asio::error_code context::set_password_callback( PasswordCallback callback, asio::error_code& ec) { return do_set_password_callback( new detail::password_callback(callback), ec); } #endif // !defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_CONTEXT_HPP galera-26.4.3/asio/asio/ssl/impl/src.hpp0000664000177500017540000000137013540715002016307 0ustar dbartmy// // impl/ssl/src.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_SRC_HPP #define ASIO_SSL_IMPL_SRC_HPP #define ASIO_SOURCE #include "asio/detail/config.hpp" #if defined(ASIO_HEADER_ONLY) # error Do not compile Asio library source with ASIO_HEADER_ONLY defined #endif #include "asio/ssl/impl/context.ipp" #include "asio/ssl/impl/error.ipp" #include "asio/ssl/detail/impl/engine.ipp" #include "asio/ssl/detail/impl/openssl_init.ipp" #include "asio/ssl/impl/rfc2818_verification.ipp" #endif // ASIO_SSL_IMPL_SRC_HPP galera-26.4.3/asio/asio/ssl/stream.hpp0000664000177500017540000006214413540715002016060 0ustar dbartmy// // ssl/stream.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_HPP #define ASIO_SSL_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_ENABLE_OLD_SSL) # include "asio/ssl/old/stream.hpp" #else // defined(ASIO_ENABLE_OLD_SSL) # include "asio/async_result.hpp" # include "asio/detail/buffer_sequence_adapter.hpp" # include "asio/detail/handler_type_requirements.hpp" # include "asio/detail/noncopyable.hpp" # include "asio/detail/type_traits.hpp" # include "asio/ssl/context.hpp" # include "asio/ssl/detail/buffered_handshake_op.hpp" # include "asio/ssl/detail/handshake_op.hpp" # include "asio/ssl/detail/io.hpp" # include "asio/ssl/detail/read_op.hpp" # include "asio/ssl/detail/shutdown_op.hpp" # include "asio/ssl/detail/stream_core.hpp" # include "asio/ssl/detail/write_op.hpp" # include "asio/ssl/stream_base.hpp" #endif // defined(ASIO_ENABLE_OLD_SSL) #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { #if defined(ASIO_ENABLE_OLD_SSL) using asio::ssl::old::stream; #else // defined(ASIO_ENABLE_OLD_SSL) /// Provides stream-oriented functionality using SSL. /** * The stream class template provides asynchronous and blocking stream-oriented * functionality using SSL. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. The application must also ensure that all * asynchronous operations are performed within the same implicit or explicit * strand. * * @par Example * To use the SSL stream template with an ip::tcp::socket, you would write: * @code * asio::io_service io_service; * asio::ssl::context ctx(asio::ssl::context::sslv23); * asio::ssl::stream sock(io_service, ctx); * @endcode * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class stream : public stream_base, private noncopyable { public: /// The native handle type of the SSL stream. typedef SSL* native_handle_type; /// Structure for use with deprecated impl_type. struct impl_struct { SSL* ssl; }; /// (Deprecated: Use native_handle_type.) The underlying implementation type. typedef impl_struct* impl_type; /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// Construct a stream. /** * This constructor creates a stream and initialises the underlying stream * object. * * @param arg The argument to be passed to initialise the underlying stream. * * @param ctx The SSL context to be used for the stream. */ template stream(Arg& arg, context& ctx) : next_layer_(arg), core_(ctx.native_handle(), next_layer_.lowest_layer().get_io_service()) { backwards_compatible_impl_.ssl = core_.engine_.native_handle(); } /// Destructor. ~stream() { } /// Get the io_service associated with the object. /** * This function may be used to obtain the io_service object that the stream * uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that stream will use to * dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return next_layer_.lowest_layer().get_io_service(); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. * * @par Example * The native_handle() function returns a pointer of type @c SSL* that is * suitable for passing to functions such as @c SSL_get_verify_result and * @c SSL_get_peer_certificate: * @code * asio::ssl::stream sock(io_service, ctx); * * // ... establish connection and perform handshake ... * * if (X509* cert = SSL_get_peer_certificate(sock.native_handle())) * { * if (SSL_get_verify_result(sock.native_handle()) == X509_V_OK) * { * // ... * } * } * @endcode */ native_handle_type native_handle() { return core_.engine_.native_handle(); } /// (Deprecated: Use native_handle().) Get the underlying implementation in /// the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to stream functionality that is * not otherwise provided. */ impl_type impl() { return &backwards_compatible_impl_; } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ const next_layer_type& next_layer() const { return next_layer_; } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the stream. The new mode will override the mode inherited from the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify. */ void set_verify_mode(verify_mode v) { asio::error_code ec; set_verify_mode(v, ec); asio::detail::throw_error(ec, "set_verify_mode"); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the stream. The new mode will override the mode inherited from the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify. */ asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec) { return core_.engine_.set_verify_mode(v, ec); } /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the stream. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify_depth. */ void set_verify_depth(int depth) { asio::error_code ec; set_verify_depth(depth, ec); asio::detail::throw_error(ec, "set_verify_depth"); } /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the stream. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify_depth. */ asio::error_code set_verify_depth( int depth, asio::error_code& ec) { return core_.engine_.set_verify_depth(depth, ec); } /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify. */ template void set_verify_callback(VerifyCallback callback) { asio::error_code ec; this->set_verify_callback(callback, ec); asio::detail::throw_error(ec, "set_verify_callback"); } /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify. */ template asio::error_code set_verify_callback(VerifyCallback callback, asio::error_code& ec) { return core_.engine_.set_verify_callback( new detail::verify_callback(callback), ec); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @throws asio::system_error Thrown on failure. */ void handshake(handshake_type type) { asio::error_code ec; handshake(type, ec); asio::detail::throw_error(ec, "handshake"); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code handshake(handshake_type type, asio::error_code& ec) { detail::io(next_layer_, core_, detail::handshake_op(type), ec); return ec; } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. * * @throws asio::system_error Thrown on failure. */ template void handshake(handshake_type type, const ConstBufferSequence& buffers) { asio::error_code ec; handshake(type, buffers, ec); asio::detail::throw_error(ec, "handshake"); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. * * @param ec Set to indicate what error occurred, if any. */ template asio::error_code handshake(handshake_type type, const ConstBufferSequence& buffers, asio::error_code& ec) { detail::io(next_layer_, core_, detail::buffered_handshake_op(type, buffers), ec); return ec; } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(HandshakeHandler, void (asio::error_code)) async_handshake(handshake_type type, ASIO_MOVE_ARG(HandshakeHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a HandshakeHandler. ASIO_HANDSHAKE_HANDLER_CHECK(HandshakeHandler, handler) type_check; asio::detail::async_result_init< HandshakeHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(HandshakeHandler)(handler)); detail::async_io(next_layer_, core_, detail::handshake_op(type), init.handler); return init.result.get(); } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. Although * the buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Amount of buffers used in handshake. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(BufferedHandshakeHandler, void (asio::error_code, std::size_t)) async_handshake(handshake_type type, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(BufferedHandshakeHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a BufferedHandshakeHandler. ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( BufferedHandshakeHandler, handler) type_check; asio::detail::async_result_init init( ASIO_MOVE_CAST(BufferedHandshakeHandler)(handler)); detail::async_io(next_layer_, core_, detail::buffered_handshake_op(type, buffers), init.handler); return init.result.get(); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @throws asio::system_error Thrown on failure. */ void shutdown() { asio::error_code ec; shutdown(ec); asio::detail::throw_error(ec, "shutdown"); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code shutdown(asio::error_code& ec) { detail::io(next_layer_, core_, detail::shutdown_op(), ec); return ec; } /// Asynchronously shut down SSL on the stream. /** * This function is used to asynchronously shut down SSL on the stream. This * function call always returns immediately. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(ShutdownHandler, void (asio::error_code)) async_shutdown(ASIO_MOVE_ARG(ShutdownHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ShutdownHandler. ASIO_SHUTDOWN_HANDLER_CHECK(ShutdownHandler, handler) type_check; asio::detail::async_result_init< ShutdownHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ShutdownHandler)(handler)); detail::async_io(next_layer_, core_, detail::shutdown_op(), init.handler); return init.result.get(); } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t n = write_some(buffers, ec); asio::detail::throw_error(ec, "write_some"); return n; } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written to the stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return detail::io(next_layer_, core_, detail::write_op(buffers), ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write one or more bytes of data to * the stream. The function call always returns immediately. * * @param buffers The data to be written to the stream. Although the buffers * object may be copied as necessary, ownership of the underlying buffers is * retained by the caller, which must guarantee that they remain valid until * the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * * @note The async_write_some operation may not transmit all of the data to * the peer. Consider using the @ref async_write function if you need to * ensure that all data is written before the blocking operation completes. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; asio::detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); detail::async_io(next_layer_, core_, detail::write_op(buffers), init.handler); return init.result.get(); } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t n = read_some(buffers, ec); asio::detail::throw_error(ec, "read_some"); return n; } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return detail::io(next_layer_, core_, detail::read_op(buffers), ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read one or more bytes of data from * the stream. The function call always returns immediately. * * @param buffers The buffers into which the data will be read. Although the * buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * * @note The async_read_some operation may not read all of the requested * number of bytes. Consider using the @ref async_read function if you need to * ensure that the requested amount of data is read before the asynchronous * operation completes. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; asio::detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); detail::async_io(next_layer_, core_, detail::read_op(buffers), init.handler); return init.result.get(); } private: Stream next_layer_; detail::stream_core core_; impl_struct backwards_compatible_impl_; }; #endif // defined(ASIO_ENABLE_OLD_SSL) } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_HPP galera-26.4.3/asio/asio/ssl/context_base.hpp0000664000177500017540000001111513540715002017233 0ustar dbartmy// // ssl/context_base.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_BASE_HPP #define ASIO_SSL_CONTEXT_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// The context_base class is used as a base for the basic_context class /// template so that we have a common place to define various enums. class context_base { public: /// Different methods supported by a context. enum method { /// Generic SSL version 2. sslv2, /// SSL version 2 client. sslv2_client, /// SSL version 2 server. sslv2_server, /// Generic SSL version 3. sslv3, /// SSL version 3 client. sslv3_client, /// SSL version 3 server. sslv3_server, /// Generic TLS version 1. tlsv1, /// TLS version 1 client. tlsv1_client, /// TLS version 1 server. tlsv1_server, /// Generic SSL/TLS. sslv23, /// SSL/TLS client. sslv23_client, /// SSL/TLS server. sslv23_server, /// Generic TLS version 1.1. tlsv11, /// TLS version 1.1 client. tlsv11_client, /// TLS version 1.1 server. tlsv11_server, /// Generic TLS version 1.2. tlsv12, /// TLS version 1.2 client. tlsv12_client, /// TLS version 1.2 server. tlsv12_server }; /// Bitmask type for SSL options. typedef long options; #if defined(GENERATING_DOCUMENTATION) /// Implement various bug workarounds. static const long default_workarounds = implementation_defined; /// Always create a new key when using tmp_dh parameters. static const long single_dh_use = implementation_defined; /// Disable SSL v2. static const long no_sslv2 = implementation_defined; /// Disable SSL v3. static const long no_sslv3 = implementation_defined; /// Disable TLS v1. static const long no_tlsv1 = implementation_defined; /// Disable TLS v1.1. static const long no_tlsv1_1 = implementation_defined; /// Disable TLS v1.2. static const long no_tlsv1_2 = implementation_defined; /// Disable compression. Compression is disabled by default. static const long no_compression = implementation_defined; #else ASIO_STATIC_CONSTANT(long, default_workarounds = SSL_OP_ALL); ASIO_STATIC_CONSTANT(long, single_dh_use = SSL_OP_SINGLE_DH_USE); ASIO_STATIC_CONSTANT(long, no_sslv2 = SSL_OP_NO_SSLv2); ASIO_STATIC_CONSTANT(long, no_sslv3 = SSL_OP_NO_SSLv3); ASIO_STATIC_CONSTANT(long, no_tlsv1 = SSL_OP_NO_TLSv1); # if defined(SSL_OP_NO_TLSv1_1) ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = SSL_OP_NO_TLSv1_1); # else // defined(SSL_OP_NO_TLSv1_1) ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = 0x10000000L); # endif // defined(SSL_OP_NO_TLSv1_1) # if defined(SSL_OP_NO_TLSv1_2) ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = SSL_OP_NO_TLSv1_2); # else // defined(SSL_OP_NO_TLSv1_2) ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = 0x08000000L); # endif // defined(SSL_OP_NO_TLSv1_2) # if defined(SSL_OP_NO_COMPRESSION) ASIO_STATIC_CONSTANT(long, no_compression = SSL_OP_NO_COMPRESSION); # else // defined(SSL_OP_NO_COMPRESSION) ASIO_STATIC_CONSTANT(long, no_compression = 0x20000L); # endif // defined(SSL_OP_NO_COMPRESSION) #endif /// File format types. enum file_format { /// ASN.1 file. asn1, /// PEM file. pem }; #if !defined(GENERATING_DOCUMENTATION) // The following types and constants are preserved for backward compatibility. // New programs should use the equivalents of the same names that are defined // in the asio::ssl namespace. typedef int verify_mode; ASIO_STATIC_CONSTANT(int, verify_none = SSL_VERIFY_NONE); ASIO_STATIC_CONSTANT(int, verify_peer = SSL_VERIFY_PEER); ASIO_STATIC_CONSTANT(int, verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT); ASIO_STATIC_CONSTANT(int, verify_client_once = SSL_VERIFY_CLIENT_ONCE); #endif /// Purpose of PEM password. enum password_purpose { /// The password is needed for reading/decryption. for_reading, /// The password is needed for writing/encryption. for_writing }; protected: /// Protected destructor to prevent deletion through this type. ~context_base() { } }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_CONTEXT_BASE_HPP galera-26.4.3/asio/asio/ssl/old/0000775000177500017540000000000013540715002014623 5ustar dbartmygalera-26.4.3/asio/asio/ssl/old/detail/0000775000177500017540000000000013540715002016065 5ustar dbartmygalera-26.4.3/asio/asio/ssl/old/detail/openssl_operation.hpp0000664000177500017540000003247413540715002022353 0ustar dbartmy// // ssl/old/detail/openssl_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_DETAIL_OPENSSL_OPERATION_HPP #define ASIO_SSL_OLD_DETAIL_OPENSSL_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/buffer.hpp" #include "asio/detail/assert.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/placeholders.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/ssl/error.hpp" #include "asio/strand.hpp" #include "asio/system_error.hpp" #include "asio/write.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { namespace detail { typedef boost::function ssl_primitive_func; typedef boost::function user_handler_func; // Network send_/recv buffer implementation // // class net_buffer { static const int NET_BUF_SIZE = 16*1024 + 256; // SSL record size + spare unsigned char buf_[NET_BUF_SIZE]; unsigned char* data_start_; unsigned char* data_end_; public: net_buffer() { data_start_ = data_end_ = buf_; } unsigned char* get_unused_start() { return data_end_; } unsigned char* get_data_start() { return data_start_; } size_t get_unused_len() { return (NET_BUF_SIZE - (data_end_ - buf_)); } size_t get_data_len() { return (data_end_ - data_start_); } void data_added(size_t count) { data_end_ += count; data_end_ = data_end_ > (buf_ + NET_BUF_SIZE)? (buf_ + NET_BUF_SIZE): data_end_; } void data_removed(size_t count) { data_start_ += count; if (data_start_ >= data_end_) reset(); } void reset() { data_start_ = buf_; data_end_ = buf_; } bool has_data() { return (data_start_ < data_end_); } }; // class net_buffer // // Operation class // // template class openssl_operation { public: // Constructor for asynchronous operations openssl_operation(ssl_primitive_func primitive, Stream& socket, net_buffer& recv_buf, SSL* session, BIO* ssl_bio, user_handler_func handler, asio::io_service::strand& strand ) : primitive_(primitive) , user_handler_(handler) , strand_(&strand) , recv_buf_(recv_buf) , socket_(socket) , ssl_bio_(ssl_bio) , session_(session) { write_ = boost::bind( &openssl_operation::do_async_write, this, boost::arg<1>(), boost::arg<2>() ); read_ = boost::bind( &openssl_operation::do_async_read, this ); handler_= boost::bind( &openssl_operation::async_user_handler, this, boost::arg<1>(), boost::arg<2>() ); } // Constructor for synchronous operations openssl_operation(ssl_primitive_func primitive, Stream& socket, net_buffer& recv_buf, SSL* session, BIO* ssl_bio) : primitive_(primitive) , strand_(0) , recv_buf_(recv_buf) , socket_(socket) , ssl_bio_(ssl_bio) , session_(session) { write_ = boost::bind( &openssl_operation::do_sync_write, this, boost::arg<1>(), boost::arg<2>() ); read_ = boost::bind( &openssl_operation::do_sync_read, this ); handler_ = boost::bind( &openssl_operation::sync_user_handler, this, boost::arg<1>(), boost::arg<2>() ); } // Start operation // In case of asynchronous it returns 0, in sync mode returns success code // or throws an error... int start() { int rc = primitive_( session_ ); bool is_operation_done = (rc > 0); // For connect/accept/shutdown, the operation // is done, when return code is 1 // for write, it is done, when is retcode > 0 // for read, it is done when retcode > 0 int error_code = !is_operation_done ? ::SSL_get_error( session_, rc ) : 0; int sys_error_code = ERR_get_error(); if (error_code == SSL_ERROR_SSL) return handler_(asio::error_code( sys_error_code, asio::error::get_ssl_category()), rc); bool is_read_needed = (error_code == SSL_ERROR_WANT_READ); bool is_write_needed = (error_code == SSL_ERROR_WANT_WRITE || ::BIO_ctrl_pending( ssl_bio_ )); bool is_shut_down_received = ((::SSL_get_shutdown( session_ ) & SSL_RECEIVED_SHUTDOWN) == SSL_RECEIVED_SHUTDOWN); bool is_shut_down_sent = ((::SSL_get_shutdown( session_ ) & SSL_SENT_SHUTDOWN) == SSL_SENT_SHUTDOWN); if (is_shut_down_sent && is_shut_down_received && is_operation_done && !is_write_needed) // SSL connection is shut down cleanly return handler_(asio::error_code(), 1); if (is_shut_down_received && !is_operation_done) // Shutdown has been requested, while we were reading or writing... // abort our action... return handler_(asio::error::shut_down, 0); if (!is_operation_done && !is_read_needed && !is_write_needed && !is_shut_down_sent) { // The operation has failed... It is not completed and does // not want network communication nor does want to send shutdown out... if (error_code == SSL_ERROR_SYSCALL) { return handler_(asio::error_code( sys_error_code, asio::error::system_category), rc); } else { return handler_(asio::error_code( sys_error_code, asio::error::get_ssl_category()), rc); } } if (!is_operation_done && !is_write_needed) { // We may have left over data that we can pass to SSL immediately if (recv_buf_.get_data_len() > 0) { // Pass the buffered data to SSL int written = ::BIO_write ( ssl_bio_, recv_buf_.get_data_start(), recv_buf_.get_data_len() ); if (written > 0) { recv_buf_.data_removed(written); } else if (written < 0) { if (!BIO_should_retry(ssl_bio_)) { // Some serios error with BIO.... return handler_(asio::error::no_recovery, 0); } } return start(); } else if (is_read_needed || (is_shut_down_sent && !is_shut_down_received)) { return read_(); } } // Continue with operation, flush any SSL data out to network... return write_(is_operation_done, rc); } // Private implementation private: typedef boost::function int_handler_func; typedef boost::function write_func; typedef boost::function read_func; ssl_primitive_func primitive_; user_handler_func user_handler_; asio::io_service::strand* strand_; write_func write_; read_func read_; int_handler_func handler_; net_buffer send_buf_; // buffers for network IO // The recv buffer is owned by the stream, not the operation, since there can // be left over bytes after passing the data up to the application, and these // bytes need to be kept around for the next read operation issued by the // application. net_buffer& recv_buf_; Stream& socket_; BIO* ssl_bio_; SSL* session_; // int sync_user_handler(const asio::error_code& error, int rc) { if (!error) return rc; throw asio::system_error(error); } int async_user_handler(asio::error_code error, int rc) { if (rc < 0) { if (!error) error = asio::error::no_recovery; rc = 0; } user_handler_(error, rc); return 0; } // Writes bytes asynchronously from SSL to NET int do_async_write(bool is_operation_done, int rc) { int len = ::BIO_ctrl_pending( ssl_bio_ ); if ( len ) { // There is something to write into net, do it... len = (int)send_buf_.get_unused_len() > len? len: send_buf_.get_unused_len(); if (len == 0) { // In case our send buffer is full, we have just to wait until // previous send to complete... return 0; } // Read outgoing data from bio len = ::BIO_read( ssl_bio_, send_buf_.get_unused_start(), len); if (len > 0) { unsigned char *data_start = send_buf_.get_unused_start(); send_buf_.data_added(len); ASIO_ASSERT(strand_); asio::async_write ( socket_, asio::buffer(data_start, len), strand_->wrap ( boost::bind ( &openssl_operation::async_write_handler, this, is_operation_done, rc, asio::placeholders::error, asio::placeholders::bytes_transferred ) ) ); return 0; } else if (!BIO_should_retry(ssl_bio_)) { // Seems like fatal error // reading from SSL BIO has failed... handler_(asio::error::no_recovery, 0); return 0; } } if (is_operation_done) { // Finish the operation, with success handler_(asio::error_code(), rc); return 0; } // OPeration is not done and writing to net has been made... // start operation again start(); return 0; } void async_write_handler(bool is_operation_done, int rc, const asio::error_code& error, size_t bytes_sent) { if (!error) { // Remove data from send buffer send_buf_.data_removed(bytes_sent); if (is_operation_done) handler_(asio::error_code(), rc); else // Since the operation was not completed, try it again... start(); } else handler_(error, rc); } int do_async_read() { // Wait for new data ASIO_ASSERT(strand_); socket_.async_read_some ( asio::buffer(recv_buf_.get_unused_start(), recv_buf_.get_unused_len()), strand_->wrap ( boost::bind ( &openssl_operation::async_read_handler, this, asio::placeholders::error, asio::placeholders::bytes_transferred ) ) ); return 0; } void async_read_handler(const asio::error_code& error, size_t bytes_recvd) { if (!error) { recv_buf_.data_added(bytes_recvd); // Pass the received data to SSL int written = ::BIO_write ( ssl_bio_, recv_buf_.get_data_start(), recv_buf_.get_data_len() ); if (written > 0) { recv_buf_.data_removed(written); } else if (written < 0) { if (!BIO_should_retry(ssl_bio_)) { // Some serios error with BIO.... handler_(asio::error::no_recovery, 0); return; } } // and try the SSL primitive again start(); } else { // Error in network level... // SSL can't continue either... handler_(error, 0); } } // Syncronous functions... int do_sync_write(bool is_operation_done, int rc) { int len = ::BIO_ctrl_pending( ssl_bio_ ); if ( len ) { // There is something to write into net, do it... len = (int)send_buf_.get_unused_len() > len? len: send_buf_.get_unused_len(); // Read outgoing data from bio len = ::BIO_read( ssl_bio_, send_buf_.get_unused_start(), len); if (len > 0) { size_t sent_len = asio::write( socket_, asio::buffer(send_buf_.get_unused_start(), len) ); send_buf_.data_added(len); send_buf_.data_removed(sent_len); } else if (!BIO_should_retry(ssl_bio_)) { // Seems like fatal error // reading from SSL BIO has failed... throw asio::system_error(asio::error::no_recovery); } } if (is_operation_done) // Finish the operation, with success return rc; // Operation is not finished, start again. return start(); } int do_sync_read() { size_t len = socket_.read_some ( asio::buffer(recv_buf_.get_unused_start(), recv_buf_.get_unused_len()) ); // Write data to ssl recv_buf_.data_added(len); // Pass the received data to SSL int written = ::BIO_write ( ssl_bio_, recv_buf_.get_data_start(), recv_buf_.get_data_len() ); if (written > 0) { recv_buf_.data_removed(written); } else if (written < 0) { if (!BIO_should_retry(ssl_bio_)) { // Some serios error with BIO.... throw asio::system_error(asio::error::no_recovery); } } // Try the operation again return start(); } }; // class openssl_operation } // namespace detail } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_DETAIL_OPENSSL_OPERATION_HPP galera-26.4.3/asio/asio/ssl/old/detail/openssl_context_service.hpp0000664000177500017540000002376013540715002023555 0ustar dbartmy// // ssl/old/detail/openssl_context_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_DETAIL_OPENSSL_CONTEXT_SERVICE_HPP #define ASIO_SSL_OLD_DETAIL_OPENSSL_CONTEXT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { namespace detail { class openssl_context_service : public asio::detail::service_base { public: // The native type of the context. typedef ::SSL_CTX* impl_type; // The type for the password callback function object. typedef boost::function password_callback_type; // Constructor. openssl_context_service(asio::io_service& io_service) : asio::detail::service_base(io_service) { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Return a null context implementation. static impl_type null() { return 0; } // Create a new context implementation. void create(impl_type& impl, context_base::method m) { switch (m) { #if defined(OPENSSL_NO_SSL2) case context_base::sslv2: case context_base::sslv2_client: case context_base::sslv2_server: asio::detail::throw_error(asio::error::invalid_argument); break; #else // defined(OPENSSL_NO_SSL2) case context_base::sslv2: impl = ::SSL_CTX_new(::SSLv2_method()); break; case context_base::sslv2_client: impl = ::SSL_CTX_new(::SSLv2_client_method()); break; case context_base::sslv2_server: impl = ::SSL_CTX_new(::SSLv2_server_method()); break; #endif // defined(OPENSSL_NO_SSL2) #if defined(OPENSSL_NO_SSL3) case context_base::sslv3: case context_base::sslv3_client: case context_base::sslv3_server: asio::detail::throw_error(asio::error::invalid_argument); break; #else // defined(OPENSSL_NO_SSL3) case context_base::sslv3: impl = ::SSL_CTX_new(::SSLv3_method()); break; case context_base::sslv3_client: impl = ::SSL_CTX_new(::SSLv3_client_method()); break; case context_base::sslv3_server: impl = ::SSL_CTX_new(::SSLv3_server_method()); break; #endif // defined(OPENSSL_NO_SSL3) case context_base::tlsv1: impl = ::SSL_CTX_new(::TLSv1_method()); break; case context_base::tlsv1_client: impl = ::SSL_CTX_new(::TLSv1_client_method()); break; case context_base::tlsv1_server: impl = ::SSL_CTX_new(::TLSv1_server_method()); break; case context_base::sslv23: impl = ::SSL_CTX_new(::SSLv23_method()); break; case context_base::sslv23_client: impl = ::SSL_CTX_new(::SSLv23_client_method()); break; case context_base::sslv23_server: impl = ::SSL_CTX_new(::SSLv23_server_method()); break; default: impl = ::SSL_CTX_new(0); break; } } // Destroy a context implementation. void destroy(impl_type& impl) { if (impl != null()) { if (impl->default_passwd_callback_userdata) { password_callback_type* callback = static_cast( impl->default_passwd_callback_userdata); delete callback; impl->default_passwd_callback_userdata = 0; } ::SSL_CTX_free(impl); impl = null(); } } // Set options on the context. asio::error_code set_options(impl_type& impl, context_base::options o, asio::error_code& ec) { ::SSL_CTX_set_options(impl, o); ec = asio::error_code(); return ec; } // Set peer verification mode. asio::error_code set_verify_mode(impl_type& impl, context_base::verify_mode v, asio::error_code& ec) { ::SSL_CTX_set_verify(impl, v, 0); ec = asio::error_code(); return ec; } // Load a certification authority file for performing verification. asio::error_code load_verify_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { if (::SSL_CTX_load_verify_locations(impl, filename.c_str(), 0) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Add a directory containing certification authority files to be used for // performing verification. asio::error_code add_verify_path(impl_type& impl, const std::string& path, asio::error_code& ec) { if (::SSL_CTX_load_verify_locations(impl, 0, path.c_str()) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use a certificate from a file. asio::error_code use_certificate_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } if (::SSL_CTX_use_certificate_file(impl, filename.c_str(), file_type) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use a certificate chain from a file. asio::error_code use_certificate_chain_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { if (::SSL_CTX_use_certificate_chain_file(impl, filename.c_str()) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use a private key from a file. asio::error_code use_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } if (::SSL_CTX_use_PrivateKey_file(impl, filename.c_str(), file_type) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use an RSA private key from a file. asio::error_code use_rsa_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; return ec; } } if (::SSL_CTX_use_RSAPrivateKey_file( impl, filename.c_str(), file_type) != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } // Use the specified file to obtain the temporary Diffie-Hellman parameters. asio::error_code use_tmp_dh_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { ::BIO* bio = ::BIO_new_file(filename.c_str(), "r"); if (!bio) { ec = asio::error::invalid_argument; return ec; } ::DH* dh = ::PEM_read_bio_DHparams(bio, 0, 0, 0); if (!dh) { ::BIO_free(bio); ec = asio::error::invalid_argument; return ec; } ::BIO_free(bio); int result = ::SSL_CTX_set_tmp_dh(impl, dh); ::DH_free(dh); if (result != 1) { ec = asio::error::invalid_argument; return ec; } ec = asio::error_code(); return ec; } static int password_callback(char* buf, int size, int purpose, void* data) { using namespace std; // For strncat and strlen. if (data) { password_callback_type* callback = static_cast(data); std::string passwd = (*callback)(static_cast(size), purpose ? context_base::for_writing : context_base::for_reading); *buf = '\0'; strncat(buf, passwd.c_str(), size); return strlen(buf); } return 0; } // Set the password callback. template asio::error_code set_password_callback(impl_type& impl, Password_Callback callback, asio::error_code& ec) { // Allocate callback function object if not already present. if (impl->default_passwd_callback_userdata) { password_callback_type* callback_function = static_cast( impl->default_passwd_callback_userdata); *callback_function = callback; } else { password_callback_type* callback_function = new password_callback_type(callback); impl->default_passwd_callback_userdata = callback_function; } // Set the password callback. SSL_CTX_set_default_passwd_cb(impl, &openssl_context_service::password_callback); ec = asio::error_code(); return ec; } private: // Ensure openssl is initialised. asio::ssl::detail::openssl_init<> init_; }; } // namespace detail } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_DETAIL_OPENSSL_CONTEXT_SERVICE_HPP galera-26.4.3/asio/asio/ssl/old/detail/openssl_stream_service.hpp0000664000177500017540000003565613540715002023373 0ustar dbartmy// // ssl/old/detail/stream_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_DETAIL_OPENSSL_STREAM_SERVICE_HPP #define ASIO_SSL_OLD_DETAIL_OPENSSL_STREAM_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include #include #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/basic_context.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/old/detail/openssl_operation.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/strand.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { namespace detail { class openssl_stream_service : public asio::detail::service_base { private: enum { max_buffer_size = INT_MAX }; //Base handler for asyncrhonous operations template class base_handler { public: typedef boost::function< void (const asio::error_code&, size_t)> func_t; base_handler(asio::io_service& io_service) : op_(NULL) , io_service_(io_service) , work_(io_service) {} void do_func(const asio::error_code& error, size_t size) { func_(error, size); } void set_operation(openssl_operation* op) { op_ = op; } void set_func(func_t func) { func_ = func; } ~base_handler() { delete op_; } private: func_t func_; openssl_operation* op_; asio::io_service& io_service_; asio::io_service::work work_; }; // class base_handler // Handler for asynchronous IO (write/read) operations template class io_handler : public base_handler { public: io_handler(Handler handler, asio::io_service& io_service) : base_handler(io_service) , handler_(handler) { this->set_func(boost::bind( &io_handler::handler_impl, this, boost::arg<1>(), boost::arg<2>() )); } private: Handler handler_; void handler_impl(const asio::error_code& error, size_t size) { std::auto_ptr > this_ptr(this); handler_(error, size); } }; // class io_handler // Handler for asyncrhonous handshake (connect, accept) functions template class handshake_handler : public base_handler { public: handshake_handler(Handler handler, asio::io_service& io_service) : base_handler(io_service) , handler_(handler) { this->set_func(boost::bind( &handshake_handler::handler_impl, this, boost::arg<1>(), boost::arg<2>() )); } private: Handler handler_; void handler_impl(const asio::error_code& error, size_t) { std::auto_ptr > this_ptr(this); handler_(error); } }; // class handshake_handler // Handler for asyncrhonous shutdown template class shutdown_handler : public base_handler { public: shutdown_handler(Handler handler, asio::io_service& io_service) : base_handler(io_service), handler_(handler) { this->set_func(boost::bind( &shutdown_handler::handler_impl, this, boost::arg<1>(), boost::arg<2>() )); } private: Handler handler_; void handler_impl(const asio::error_code& error, size_t) { std::auto_ptr > this_ptr(this); handler_(error); } }; // class shutdown_handler public: // The implementation type. typedef struct impl_struct { ::SSL* ssl; ::BIO* ext_bio; net_buffer recv_buf; } * impl_type; // Construct a new stream socket service for the specified io_service. explicit openssl_stream_service(asio::io_service& io_service) : asio::detail::service_base(io_service), strand_(io_service) { } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // Return a null stream implementation. impl_type null() const { return 0; } // Create a new stream implementation. template void create(impl_type& impl, Stream& /*next_layer*/, basic_context& context) { impl = new impl_struct; impl->ssl = ::SSL_new(context.impl()); ::SSL_set_mode(impl->ssl, SSL_MODE_ENABLE_PARTIAL_WRITE); ::SSL_set_mode(impl->ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); ::BIO* int_bio = 0; impl->ext_bio = 0; ::BIO_new_bio_pair(&int_bio, 8192, &impl->ext_bio, 8192); ::SSL_set_bio(impl->ssl, int_bio, int_bio); } // Destroy a stream implementation. template void destroy(impl_type& impl, Stream& /*next_layer*/) { if (impl != 0) { ::BIO_free(impl->ext_bio); ::SSL_free(impl->ssl); delete impl; impl = 0; } } // Perform SSL handshaking. template asio::error_code handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, asio::error_code& ec) { try { openssl_operation op( type == stream_base::client ? &ssl_wrap::SSL_connect: &ssl_wrap::SSL_accept, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio); op.start(); } catch (asio::system_error& e) { ec = e.code(); return ec; } ec = asio::error_code(); return ec; } // Start an asynchronous SSL handshake. template void async_handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, Handler handler) { typedef handshake_handler connect_handler; connect_handler* local_handler = new connect_handler(handler, get_io_service()); openssl_operation* op = new openssl_operation ( type == stream_base::client ? &ssl_wrap::SSL_connect: &ssl_wrap::SSL_accept, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Shut down SSL on the stream. template asio::error_code shutdown(impl_type& impl, Stream& next_layer, asio::error_code& ec) { try { openssl_operation op( &ssl_wrap::SSL_shutdown, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio); op.start(); } catch (asio::system_error& e) { ec = e.code(); return ec; } ec = asio::error_code(); return ec; } // Asynchronously shut down SSL on the stream. template void async_shutdown(impl_type& impl, Stream& next_layer, Handler handler) { typedef shutdown_handler disconnect_handler; disconnect_handler* local_handler = new disconnect_handler(handler, get_io_service()); openssl_operation* op = new openssl_operation ( &ssl_wrap::SSL_shutdown, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Write some data to the stream. template std::size_t write_some(impl_type& impl, Stream& next_layer, const Const_Buffers& buffers, asio::error_code& ec) { size_t bytes_transferred = 0; try { asio::const_buffer buffer = asio::detail::buffer_sequence_adapter< asio::const_buffer, Const_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { ec = asio::error_code(); return 0; } boost::function send_func = boost::bind(boost::type(), &::SSL_write, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation op( send_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio ); bytes_transferred = static_cast(op.start()); } catch (asio::system_error& e) { ec = e.code(); return 0; } ec = asio::error_code(); return bytes_transferred; } // Start an asynchronous write. template void async_write_some(impl_type& impl, Stream& next_layer, const Const_Buffers& buffers, Handler handler) { typedef io_handler send_handler; asio::const_buffer buffer = asio::detail::buffer_sequence_adapter< asio::const_buffer, Const_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { get_io_service().post(asio::detail::bind_handler( handler, asio::error_code(), 0)); return; } send_handler* local_handler = new send_handler(handler, get_io_service()); boost::function send_func = boost::bind(boost::type(), &::SSL_write, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation* op = new openssl_operation ( send_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Read some data from the stream. template std::size_t read_some(impl_type& impl, Stream& next_layer, const Mutable_Buffers& buffers, asio::error_code& ec) { size_t bytes_transferred = 0; try { asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter< asio::mutable_buffer, Mutable_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { ec = asio::error_code(); return 0; } boost::function recv_func = boost::bind(boost::type(), &::SSL_read, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation op(recv_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio ); bytes_transferred = static_cast(op.start()); } catch (asio::system_error& e) { ec = e.code(); return 0; } ec = asio::error_code(); return bytes_transferred; } // Start an asynchronous read. template void async_read_some(impl_type& impl, Stream& next_layer, const Mutable_Buffers& buffers, Handler handler) { typedef io_handler recv_handler; asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter< asio::mutable_buffer, Mutable_Buffers>::first(buffers); std::size_t buffer_size = asio::buffer_size(buffer); if (buffer_size > max_buffer_size) buffer_size = max_buffer_size; else if (buffer_size == 0) { get_io_service().post(asio::detail::bind_handler( handler, asio::error_code(), 0)); return; } recv_handler* local_handler = new recv_handler(handler, get_io_service()); boost::function recv_func = boost::bind(boost::type(), &::SSL_read, boost::arg<1>(), asio::buffer_cast(buffer), static_cast(buffer_size)); openssl_operation* op = new openssl_operation ( recv_func, next_layer, impl->recv_buf, impl->ssl, impl->ext_bio, boost::bind ( &base_handler::do_func, local_handler, boost::arg<1>(), boost::arg<2>() ), strand_ ); local_handler->set_operation(op); strand_.post(boost::bind(&openssl_operation::start, op)); } // Peek at the incoming data on the stream. template std::size_t peek(impl_type& /*impl*/, Stream& /*next_layer*/, const Mutable_Buffers& /*buffers*/, asio::error_code& ec) { ec = asio::error_code(); return 0; } // Determine the amount of data that may be read without blocking. template std::size_t in_avail(impl_type& /*impl*/, Stream& /*next_layer*/, asio::error_code& ec) { ec = asio::error_code(); return 0; } private: asio::io_service::strand strand_; typedef asio::detail::mutex mutex_type; template struct ssl_wrap { static Mutex ssl_mutex_; static int SSL_accept(SSL *ssl) { typename Mutex::scoped_lock lock(ssl_mutex_); return ::SSL_accept(ssl); } static int SSL_connect(SSL *ssl) { typename Mutex::scoped_lock lock(ssl_mutex_); return ::SSL_connect(ssl); } static int SSL_shutdown(SSL *ssl) { typename Mutex::scoped_lock lock(ssl_mutex_); return ::SSL_shutdown(ssl); } }; }; template Mutex openssl_stream_service::ssl_wrap::ssl_mutex_; } // namespace detail } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_DETAIL_OPENSSL_STREAM_SERVICE_HPP galera-26.4.3/asio/asio/ssl/old/stream_service.hpp0000664000177500017540000001274013540715002020353 0ustar dbartmy// // ssl/old/stream_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_STREAM_SERVICE_HPP #define ASIO_SSL_OLD_STREAM_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/io_service.hpp" #include "asio/ssl/basic_context.hpp" #include "asio/ssl/old/detail/openssl_stream_service.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// Default service implementation for an SSL stream. class stream_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { private: // The type of the platform-specific implementation. typedef old::detail::openssl_stream_service service_impl_type; public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The type of a stream implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined impl_type; #else typedef service_impl_type::impl_type impl_type; #endif /// Construct a new stream service for the specified io_service. explicit stream_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(asio::use_service(io_service)) { } /// Return a null stream implementation. impl_type null() const { return service_impl_.null(); } /// Create a new stream implementation. template void create(impl_type& impl, Stream& next_layer, basic_context& context) { service_impl_.create(impl, next_layer, context); } /// Destroy a stream implementation. template void destroy(impl_type& impl, Stream& next_layer) { service_impl_.destroy(impl, next_layer); } /// Perform SSL handshaking. template asio::error_code handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, asio::error_code& ec) { return service_impl_.handshake(impl, next_layer, type, ec); } /// Start an asynchronous SSL handshake. template void async_handshake(impl_type& impl, Stream& next_layer, stream_base::handshake_type type, HandshakeHandler handler) { service_impl_.async_handshake(impl, next_layer, type, handler); } /// Shut down SSL on the stream. template asio::error_code shutdown(impl_type& impl, Stream& next_layer, asio::error_code& ec) { return service_impl_.shutdown(impl, next_layer, ec); } /// Asynchronously shut down SSL on the stream. template void async_shutdown(impl_type& impl, Stream& next_layer, ShutdownHandler handler) { service_impl_.async_shutdown(impl, next_layer, handler); } /// Write some data to the stream. template std::size_t write_some(impl_type& impl, Stream& next_layer, const ConstBufferSequence& buffers, asio::error_code& ec) { return service_impl_.write_some(impl, next_layer, buffers, ec); } /// Start an asynchronous write. template void async_write_some(impl_type& impl, Stream& next_layer, const ConstBufferSequence& buffers, WriteHandler handler) { service_impl_.async_write_some(impl, next_layer, buffers, handler); } /// Read some data from the stream. template std::size_t read_some(impl_type& impl, Stream& next_layer, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.read_some(impl, next_layer, buffers, ec); } /// Start an asynchronous read. template void async_read_some(impl_type& impl, Stream& next_layer, const MutableBufferSequence& buffers, ReadHandler handler) { service_impl_.async_read_some(impl, next_layer, buffers, handler); } /// Peek at the incoming data on the stream. template std::size_t peek(impl_type& impl, Stream& next_layer, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.peek(impl, next_layer, buffers, ec); } /// Determine the amount of data that may be read without blocking. template std::size_t in_avail(impl_type& impl, Stream& next_layer, asio::error_code& ec) { return service_impl_.in_avail(impl, next_layer, ec); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // The service that provides the platform-specific implementation. service_impl_type& service_impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_STREAM_SERVICE_HPP galera-26.4.3/asio/asio/ssl/old/basic_context.hpp0000664000177500017540000003235113540715002020165 0ustar dbartmy// // ssl/old/basic_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_BASIC_CONTEXT_HPP #define ASIO_SSL_OLD_BASIC_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/context_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// SSL context. template class basic_context : public context_base, private boost::noncopyable { public: /// The type of the service that will be used to provide context operations. typedef Service service_type; /// The native implementation type of the SSL context. typedef typename service_type::impl_type impl_type; /// Constructor. basic_context(asio::io_service& io_service, method m) : service_(asio::use_service(io_service)), impl_(service_.null()) { service_.create(impl_, m); } /// Destructor. ~basic_context() { service_.destroy(impl_); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ impl_type impl() { return impl_; } /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @throws asio::system_error Thrown on failure. */ void set_options(options o) { asio::error_code ec; service_.set_options(impl_, o, ec); asio::detail::throw_error(ec); } /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code set_options(options o, asio::error_code& ec) { return service_.set_options(impl_, o, ec); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. The available verify_mode * values are defined in the context_base class. * * @throws asio::system_error Thrown on failure. */ void set_verify_mode(verify_mode v) { asio::error_code ec; service_.set_verify_mode(impl_, v, ec); asio::detail::throw_error(ec); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. The available verify_mode * values are defined in the context_base class. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code set_verify_mode(verify_mode v, asio::error_code& ec) { return service_.set_verify_mode(impl_, v, ec); } /// Load a certification authority file for performing verification. /** * This function is used to load one or more trusted certification authorities * from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @throws asio::system_error Thrown on failure. */ void load_verify_file(const std::string& filename) { asio::error_code ec; service_.load_verify_file(impl_, filename, ec); asio::detail::throw_error(ec); } /// Load a certification authority file for performing verification. /** * This function is used to load the certificates for one or more trusted * certification authorities from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code load_verify_file(const std::string& filename, asio::error_code& ec) { return service_.load_verify_file(impl_, filename, ec); } /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @throws asio::system_error Thrown on failure. */ void add_verify_path(const std::string& path) { asio::error_code ec; service_.add_verify_path(impl_, path, ec); asio::detail::throw_error(ec); } /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code add_verify_path(const std::string& path, asio::error_code& ec) { return service_.add_verify_path(impl_, path, ec); } /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. */ void use_certificate_file(const std::string& filename, file_format format) { asio::error_code ec; service_.use_certificate_file(impl_, filename, format, ec); asio::detail::throw_error(ec); } /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_certificate_file(const std::string& filename, file_format format, asio::error_code& ec) { return service_.use_certificate_file(impl_, filename, format, ec); } /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @throws asio::system_error Thrown on failure. */ void use_certificate_chain_file(const std::string& filename) { asio::error_code ec; service_.use_certificate_chain_file(impl_, filename, ec); asio::detail::throw_error(ec); } /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_certificate_chain_file( const std::string& filename, asio::error_code& ec) { return service_.use_certificate_chain_file(impl_, filename, ec); } /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. */ void use_private_key_file(const std::string& filename, file_format format) { asio::error_code ec; service_.use_private_key_file(impl_, filename, format, ec); asio::detail::throw_error(ec); } /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_private_key_file(const std::string& filename, file_format format, asio::error_code& ec) { return service_.use_private_key_file(impl_, filename, format, ec); } /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. */ void use_rsa_private_key_file(const std::string& filename, file_format format) { asio::error_code ec; service_.use_rsa_private_key_file(impl_, filename, format, ec); asio::detail::throw_error(ec); } /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_rsa_private_key_file( const std::string& filename, file_format format, asio::error_code& ec) { return service_.use_rsa_private_key_file(impl_, filename, format, ec); } /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @throws asio::system_error Thrown on failure. */ void use_tmp_dh_file(const std::string& filename) { asio::error_code ec; service_.use_tmp_dh_file(impl_, filename, ec); asio::detail::throw_error(ec); } /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code use_tmp_dh_file(const std::string& filename, asio::error_code& ec) { return service_.use_tmp_dh_file(impl_, filename, ec); } /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @throws asio::system_error Thrown on failure. */ template void set_password_callback(PasswordCallback callback) { asio::error_code ec; service_.set_password_callback(impl_, callback, ec); asio::detail::throw_error(ec); } /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @param ec Set to indicate what error occurred, if any. */ template asio::error_code set_password_callback(PasswordCallback callback, asio::error_code& ec) { return service_.set_password_callback(impl_, callback, ec); } private: /// The backend service implementation. service_type& service_; /// The underlying native implementation. impl_type impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_BASIC_CONTEXT_HPP galera-26.4.3/asio/asio/ssl/old/context_service.hpp0000664000177500017540000001165713540715002020552 0ustar dbartmy// // ssl/old/context_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_CONTEXT_SERVICE_HPP #define ASIO_SSL_OLD_CONTEXT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/old/detail/openssl_context_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// Default service implementation for a context. class context_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { private: // The type of the platform-specific implementation. typedef old::detail::openssl_context_service service_impl_type; public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif /// The type of the context. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined impl_type; #else typedef service_impl_type::impl_type impl_type; #endif /// Constructor. explicit context_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(asio::use_service(io_service)) { } /// Return a null context implementation. impl_type null() const { return service_impl_.null(); } /// Create a new context implementation. void create(impl_type& impl, context_base::method m) { service_impl_.create(impl, m); } /// Destroy a context implementation. void destroy(impl_type& impl) { service_impl_.destroy(impl); } /// Set options on the context. asio::error_code set_options(impl_type& impl, context_base::options o, asio::error_code& ec) { return service_impl_.set_options(impl, o, ec); } /// Set peer verification mode. asio::error_code set_verify_mode(impl_type& impl, context_base::verify_mode v, asio::error_code& ec) { return service_impl_.set_verify_mode(impl, v, ec); } /// Load a certification authority file for performing verification. asio::error_code load_verify_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { return service_impl_.load_verify_file(impl, filename, ec); } /// Add a directory containing certification authority files to be used for /// performing verification. asio::error_code add_verify_path(impl_type& impl, const std::string& path, asio::error_code& ec) { return service_impl_.add_verify_path(impl, path, ec); } /// Use a certificate from a file. asio::error_code use_certificate_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { return service_impl_.use_certificate_file(impl, filename, format, ec); } /// Use a certificate chain from a file. asio::error_code use_certificate_chain_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { return service_impl_.use_certificate_chain_file(impl, filename, ec); } /// Use a private key from a file. asio::error_code use_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { return service_impl_.use_private_key_file(impl, filename, format, ec); } /// Use an RSA private key from a file. asio::error_code use_rsa_private_key_file(impl_type& impl, const std::string& filename, context_base::file_format format, asio::error_code& ec) { return service_impl_.use_rsa_private_key_file(impl, filename, format, ec); } /// Use the specified file to obtain the temporary Diffie-Hellman parameters. asio::error_code use_tmp_dh_file(impl_type& impl, const std::string& filename, asio::error_code& ec) { return service_impl_.use_tmp_dh_file(impl, filename, ec); } /// Set the password callback. template asio::error_code set_password_callback(impl_type& impl, PasswordCallback callback, asio::error_code& ec) { return service_impl_.set_password_callback(impl, callback, ec); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { } // The service that provides the platform-specific implementation. service_impl_type& service_impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_CONTEXT_SERVICE_HPP galera-26.4.3/asio/asio/ssl/old/stream.hpp0000664000177500017540000004053713540715002016640 0ustar dbartmy// // ssl/old/stream.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_OLD_STREAM_HPP #define ASIO_SSL_OLD_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/ssl/basic_context.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/stream_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace old { /// Provides stream-oriented functionality using SSL. /** * The stream class template provides asynchronous and blocking stream-oriented * functionality using SSL. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * To use the SSL stream template with an ip::tcp::socket, you would write: * @code * asio::io_service io_service; * asio::ssl::context context(io_service, asio::ssl::context::sslv23); * asio::ssl::stream sock(io_service, context); * @endcode * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncRead_Stream, SyncWriteStream. */ template class stream : public stream_base, private boost::noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// The type of the service that will be used to provide stream operations. typedef Service service_type; /// The native implementation type of the stream. typedef typename service_type::impl_type impl_type; /// Construct a stream. /** * This constructor creates a stream and initialises the underlying stream * object. * * @param arg The argument to be passed to initialise the underlying stream. * * @param context The SSL context to be used for the stream. */ template explicit stream(Arg& arg, basic_context& context) : next_layer_(arg), service_(asio::use_service(next_layer_.get_io_service())), impl_(service_.null()) { service_.create(impl_, next_layer_, context); } /// Destructor. ~stream() { service_.destroy(impl_, next_layer_); } /// Get the io_service associated with the object. /** * This function may be used to obtain the io_service object that the stream * uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that stream will use to * dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return next_layer_.get_io_service(); } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * stream layers. * * @return A const reference to the lowest layer in the stack of stream * layers. Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to stream functionality that is * not otherwise provided. */ impl_type impl() { return impl_; } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @throws asio::system_error Thrown on failure. */ void handshake(handshake_type type) { asio::error_code ec; service_.handshake(impl_, next_layer_, type, ec); asio::detail::throw_error(ec); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code handshake(handshake_type type, asio::error_code& ec) { return service_.handshake(impl_, next_layer_, type, ec); } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template void async_handshake(handshake_type type, HandshakeHandler handler) { service_.async_handshake(impl_, next_layer_, type, handler); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @throws asio::system_error Thrown on failure. */ void shutdown() { asio::error_code ec; service_.shutdown(impl_, next_layer_, ec); asio::detail::throw_error(ec); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code shutdown(asio::error_code& ec) { return service_.shutdown(impl_, next_layer_, ec); } /// Asynchronously shut down SSL on the stream. /** * This function is used to asynchronously shut down SSL on the stream. This * function call always returns immediately. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template void async_shutdown(ShutdownHandler handler) { service_.async_shutdown(impl_, next_layer_, handler); } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = service_.write_some(impl_, next_layer_, buffers, ec); asio::detail::throw_error(ec); return s; } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written to the stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return service_.write_some(impl_, next_layer_, buffers, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write one or more bytes of data to * the stream. The function call always returns immediately. * * @param buffers The data to be written to the stream. Although the buffers * object may be copied as necessary, ownership of the underlying buffers is * retained by the caller, which must guarantee that they remain valid until * the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * * @note The async_write_some operation may not transmit all of the data to * the peer. Consider using the @ref async_write function if you need to * ensure that all data is written before the blocking operation completes. */ template void async_write_some(const ConstBufferSequence& buffers, WriteHandler handler) { service_.async_write_some(impl_, next_layer_, buffers, handler); } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = service_.read_some(impl_, next_layer_, buffers, ec); asio::detail::throw_error(ec); return s; } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return service_.read_some(impl_, next_layer_, buffers, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read one or more bytes of data from * the stream. The function call always returns immediately. * * @param buffers The buffers into which the data will be read. Although the * buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * * @note The async_read_some operation may not read all of the requested * number of bytes. Consider using the @ref async_read function if you need to * ensure that the requested amount of data is read before the asynchronous * operation completes. */ template void async_read_some(const MutableBufferSequence& buffers, ReadHandler handler) { service_.async_read_some(impl_, next_layer_, buffers, handler); } /// Peek at the incoming data on the stream. /** * This function is used to peek at the incoming data on the stream, without * removing it from the input queue. The function call will block until data * has been read successfully or an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. */ template std::size_t peek(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = service_.peek(impl_, next_layer_, buffers, ec); asio::detail::throw_error(ec); return s; } /// Peek at the incoming data on the stream. /** * This function is used to peek at the incoming data on the stream, withoutxi * removing it from the input queue. The function call will block until data * has been read successfully or an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. */ template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return service_.peek(impl_, next_layer_, buffers, ec); } /// Determine the amount of data that may be read without blocking. /** * This function is used to determine the amount of data, in bytes, that may * be read from the stream without blocking. * * @returns The number of bytes of data that can be read without blocking. * * @throws asio::system_error Thrown on failure. */ std::size_t in_avail() { asio::error_code ec; std::size_t s = service_.in_avail(impl_, next_layer_, ec); asio::detail::throw_error(ec); return s; } /// Determine the amount of data that may be read without blocking. /** * This function is used to determine the amount of data, in bytes, that may * be read from the stream without blocking. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes of data that can be read without blocking. */ std::size_t in_avail(asio::error_code& ec) { return service_.in_avail(impl_, next_layer_, ec); } private: /// The next layer. Stream next_layer_; /// The backend service implementation. service_type& service_; /// The underlying native implementation. impl_type impl_; }; } // namespace old } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_OLD_STREAM_HPP galera-26.4.3/asio/asio/version.hpp0000664000177500017540000000120413540715002015437 0ustar dbartmy// // version.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_VERSION_HPP #define ASIO_VERSION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) // ASIO_VERSION % 100 is the sub-minor version // ASIO_VERSION / 100 % 1000 is the minor version // ASIO_VERSION / 100000 is the major version #define ASIO_VERSION 101008 // 1.10.8 #endif // ASIO_VERSION_HPP galera-26.4.3/asio/asio/basic_raw_socket.hpp0000664000177500017540000011100113540715002017251 0ustar dbartmy// // basic_raw_socket.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_RAW_SOCKET_HPP #define ASIO_BASIC_RAW_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/raw_socket_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides raw-oriented socket functionality. /** * The basic_raw_socket class template provides asynchronous and blocking * raw-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template > class basic_raw_socket : public basic_socket { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename RawSocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename RawSocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_raw_socket without opening it. /** * This constructor creates a raw socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. */ explicit basic_raw_socket(asio::io_service& io_service) : basic_socket(io_service) { } /// Construct and open a basic_raw_socket. /** * This constructor creates and opens a raw socket. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_socket(io_service, protocol) { } /// Construct a basic_raw_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a raw socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param endpoint An endpoint on the local machine to which the raw * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_socket(io_service, endpoint) { } /// Construct a basic_raw_socket on an existing native socket. /** * This constructor creates a raw socket object to hold an existing * native socket. * * @param io_service The io_service object that the raw socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket( io_service, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_raw_socket from another. /** * This constructor moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ basic_raw_socket(basic_raw_socket&& other) : basic_socket( ASIO_MOVE_CAST(basic_raw_socket)(other)) { } /// Move-assign a basic_raw_socket from another. /** * This assignment operator moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ basic_raw_socket& operator=(basic_raw_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST(basic_raw_socket)(other)); return *this; } /// Move-construct a basic_raw_socket from a socket of another protocol type. /** * This constructor moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ template basic_raw_socket(basic_raw_socket&& other, typename enable_if::value>::type* = 0) : basic_socket( ASIO_MOVE_CAST2(basic_raw_socket< Protocol1, RawSocketService1>)(other)) { } /// Move-assign a basic_raw_socket from a socket of another protocol type. /** * This assignment operator moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(io_service&) constructor. */ template typename enable_if::value, basic_raw_socket>::type& operator=( basic_raw_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST2(basic_raw_socket< Protocol1, RawSocketService1>)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code socket.send(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous send on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected raw * socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected raw * socket. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.send_to(asio::buffer(data, size), destination); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination) { asio::error_code ec; std::size_t s = this->get_service().send_to( this->get_implementation(), buffers, destination, 0, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send_to( this->get_implementation(), buffers, destination, flags, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send_to(this->get_implementation(), buffers, destination, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send raw data to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_send_to( * asio::buffer(data, size), destination, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send_to(this->get_implementation(), buffers, destination, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send. /** * This function is used to asynchronously send raw data to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send_to( this->get_implementation(), buffers, destination, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.receive(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the raw * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * raw socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the raw * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * raw socket. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * asio::ip::udp::endpoint sender_endpoint; * socket.receive_from( * asio::buffer(data, size), sender_endpoint); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint) { asio::error_code ec; std::size_t s = this->get_service().receive_from( this->get_implementation(), buffers, sender_endpoint, 0, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive_from( this->get_implementation(), buffers, sender_endpoint, flags, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive_from(this->get_implementation(), buffers, sender_endpoint, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive raw data. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.async_receive_from( * asio::buffer(data, size), 0, sender_endpoint, handler); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive_from( this->get_implementation(), buffers, sender_endpoint, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive raw data. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive_from( this->get_implementation(), buffers, sender_endpoint, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_RAW_SOCKET_HPP galera-26.4.3/asio/asio/basic_datagram_socket.hpp0000664000177500017540000011200113540715002020241 0ustar dbartmy// // basic_datagram_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_DATAGRAM_SOCKET_HPP #define ASIO_BASIC_DATAGRAM_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/datagram_socket_service.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides datagram-oriented socket functionality. /** * The basic_datagram_socket class template provides asynchronous and blocking * datagram-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template > class basic_datagram_socket : public basic_socket { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename DatagramSocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename DatagramSocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_datagram_socket without opening it. /** * This constructor creates a datagram socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param io_service The io_service object that the datagram socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. */ explicit basic_datagram_socket(asio::io_service& io_service) : basic_socket(io_service) { } /// Construct and open a basic_datagram_socket. /** * This constructor creates and opens a datagram socket. * * @param io_service The io_service object that the datagram socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_datagram_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_socket(io_service, protocol) { } /// Construct a basic_datagram_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a datagram socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param io_service The io_service object that the datagram socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param endpoint An endpoint on the local machine to which the datagram * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_datagram_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_socket(io_service, endpoint) { } /// Construct a basic_datagram_socket on an existing native socket. /** * This constructor creates a datagram socket object to hold an existing * native socket. * * @param io_service The io_service object that the datagram socket will use * to dispatch handlers for any asynchronous operations performed on the * socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_datagram_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket( io_service, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_datagram_socket from another. /** * This constructor moves a datagram socket from one object to another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(io_service&) constructor. */ basic_datagram_socket(basic_datagram_socket&& other) : basic_socket( ASIO_MOVE_CAST(basic_datagram_socket)(other)) { } /// Move-assign a basic_datagram_socket from another. /** * This assignment operator moves a datagram socket from one object to * another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(io_service&) constructor. */ basic_datagram_socket& operator=(basic_datagram_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST(basic_datagram_socket)(other)); return *this; } /// Move-construct a basic_datagram_socket from a socket of another protocol /// type. /** * This constructor moves a datagram socket from one object to another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(io_service&) constructor. */ template basic_datagram_socket( basic_datagram_socket&& other, typename enable_if::value>::type* = 0) : basic_socket( ASIO_MOVE_CAST2(basic_datagram_socket< Protocol1, DatagramSocketService1>)(other)) { } /// Move-assign a basic_datagram_socket from a socket of another protocol /// type. /** * This assignment operator moves a datagram socket from one object to * another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(io_service&) constructor. */ template typename enable_if::value, basic_datagram_socket>::type& operator=( basic_datagram_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST2(basic_datagram_socket< Protocol1, DatagramSocketService1>)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Send some data on a connected socket. /** * This function is used to send data on the datagram socket. The function * call will block until the data has been sent successfully or an error * occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected datagram socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code socket.send(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the datagram socket. The function * call will block until the data has been sent successfully or an error * occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected datagram socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the datagram socket. The function * call will block until the data has been sent successfully or an error * occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected datagram socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous send on a connected socket. /** * This function is used to asynchronously send data on the datagram socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected datagram * socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send on a connected socket. /** * This function is used to asynchronously send data on the datagram socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected datagram * socket. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Send a datagram to the specified endpoint. /** * This function is used to send a datagram to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.send_to(asio::buffer(data, size), destination); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination) { asio::error_code ec; std::size_t s = this->get_service().send_to( this->get_implementation(), buffers, destination, 0, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send a datagram to the specified endpoint. /** * This function is used to send a datagram to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send_to( this->get_implementation(), buffers, destination, flags, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send a datagram to the specified endpoint. /** * This function is used to send a datagram to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send_to(this->get_implementation(), buffers, destination, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send a datagram to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_send_to( * asio::buffer(data, size), destination, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send_to( this->get_implementation(), buffers, destination, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send. /** * This function is used to asynchronously send a datagram to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send_to( this->get_implementation(), buffers, destination, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Receive some data on a connected socket. /** * This function is used to receive data on the datagram socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected datagram * socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.receive(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the datagram socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected datagram * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the datagram socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected datagram * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the datagram * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * datagram socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the datagram * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * datagram socket. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Receive a datagram with the endpoint of the sender. /** * This function is used to receive a datagram. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * asio::ip::udp::endpoint sender_endpoint; * socket.receive_from( * asio::buffer(data, size), sender_endpoint); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint) { asio::error_code ec; std::size_t s = this->get_service().receive_from( this->get_implementation(), buffers, sender_endpoint, 0, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive a datagram with the endpoint of the sender. /** * This function is used to receive a datagram. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive_from( this->get_implementation(), buffers, sender_endpoint, flags, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive a datagram with the endpoint of the sender. /** * This function is used to receive a datagram. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive_from(this->get_implementation(), buffers, sender_endpoint, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive a datagram. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.async_receive_from( * asio::buffer(data, size), sender_endpoint, handler); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive_from( this->get_implementation(), buffers, sender_endpoint, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive a datagram. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive_from( this->get_implementation(), buffers, sender_endpoint, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_DATAGRAM_SOCKET_HPP galera-26.4.3/asio/asio/socket_base.hpp0000664000177500017540000003346713540715002016254 0ustar dbartmy// // socket_base.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SOCKET_BASE_HPP #define ASIO_SOCKET_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/io_control.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// The socket_base class is used as a base for the basic_stream_socket and /// basic_datagram_socket class templates so that we have a common place to /// define the shutdown_type and enum. class socket_base { public: /// Different ways a socket may be shutdown. enum shutdown_type { #if defined(GENERATING_DOCUMENTATION) /// Shutdown the receive side of the socket. shutdown_receive = implementation_defined, /// Shutdown the send side of the socket. shutdown_send = implementation_defined, /// Shutdown both send and receive on the socket. shutdown_both = implementation_defined #else shutdown_receive = ASIO_OS_DEF(SHUT_RD), shutdown_send = ASIO_OS_DEF(SHUT_WR), shutdown_both = ASIO_OS_DEF(SHUT_RDWR) #endif }; /// Bitmask type for flags that can be passed to send and receive operations. typedef int message_flags; #if defined(GENERATING_DOCUMENTATION) /// Peek at incoming data without removing it from the input queue. static const int message_peek = implementation_defined; /// Process out-of-band data. static const int message_out_of_band = implementation_defined; /// Specify that the data should not be subject to routing. static const int message_do_not_route = implementation_defined; /// Specifies that the data marks the end of a record. static const int message_end_of_record = implementation_defined; #else ASIO_STATIC_CONSTANT(int, message_peek = ASIO_OS_DEF(MSG_PEEK)); ASIO_STATIC_CONSTANT(int, message_out_of_band = ASIO_OS_DEF(MSG_OOB)); ASIO_STATIC_CONSTANT(int, message_do_not_route = ASIO_OS_DEF(MSG_DONTROUTE)); ASIO_STATIC_CONSTANT(int, message_end_of_record = ASIO_OS_DEF(MSG_EOR)); #endif /// Socket option to permit sending of broadcast messages. /** * Implements the SOL_SOCKET/SO_BROADCAST socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::broadcast option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::broadcast option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined broadcast; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_BROADCAST)> broadcast; #endif /// Socket option to enable socket-level debugging. /** * Implements the SOL_SOCKET/SO_DEBUG socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::debug option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::debug option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined debug; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DEBUG)> debug; #endif /// Socket option to prevent routing, use local interfaces only. /** * Implements the SOL_SOCKET/SO_DONTROUTE socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::do_not_route option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(io_service); * ... * asio::socket_base::do_not_route option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined do_not_route; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DONTROUTE)> do_not_route; #endif /// Socket option to send keep-alives. /** * Implements the SOL_SOCKET/SO_KEEPALIVE socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::keep_alive option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined keep_alive; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_KEEPALIVE)> keep_alive; #endif /// Socket option for the send buffer size of a socket. /** * Implements the SOL_SOCKET/SO_SNDBUF socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_buffer_size option(8192); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_buffer_size option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined send_buffer_size; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDBUF)> send_buffer_size; #endif /// Socket option for the send low watermark. /** * Implements the SOL_SOCKET/SO_SNDLOWAT socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_low_watermark option(1024); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::send_low_watermark option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined send_low_watermark; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDLOWAT)> send_low_watermark; #endif /// Socket option for the receive buffer size of a socket. /** * Implements the SOL_SOCKET/SO_RCVBUF socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_buffer_size option(8192); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_buffer_size option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined receive_buffer_size; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVBUF)> receive_buffer_size; #endif /// Socket option for the receive low watermark. /** * Implements the SOL_SOCKET/SO_RCVLOWAT socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_low_watermark option(1024); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::receive_low_watermark option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined receive_low_watermark; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVLOWAT)> receive_low_watermark; #endif /// Socket option to allow the socket to be bound to an address that is /// already in use. /** * Implements the SOL_SOCKET/SO_REUSEADDR socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::reuse_address option(true); * acceptor.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::reuse_address option; * acceptor.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined reuse_address; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_REUSEADDR)> reuse_address; #endif /// Socket option to specify whether the socket lingers on close if unsent /// data is present. /** * Implements the SOL_SOCKET/SO_LINGER socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::linger option(true, 30); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::linger option; * socket.get_option(option); * bool is_set = option.enabled(); * unsigned short timeout = option.timeout(); * @endcode * * @par Concepts: * Socket_Option, Linger_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined linger; #else typedef asio::detail::socket_option::linger< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_LINGER)> linger; #endif /// Socket option to report aborted connections on accept. /** * Implements a custom socket option that determines whether or not an accept * operation is permitted to fail with asio::error::connection_aborted. * By default the option is false. * * @par Examples * Setting the option: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::enable_connection_aborted option(true); * acceptor.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::acceptor acceptor(io_service); * ... * asio::socket_base::enable_connection_aborted option; * acceptor.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined enable_connection_aborted; #else typedef asio::detail::socket_option::boolean< asio::detail::custom_socket_option_level, asio::detail::enable_connection_aborted_option> enable_connection_aborted; #endif /// (Deprecated: Use non_blocking().) IO control command to /// set the blocking mode of the socket. /** * Implements the FIONBIO IO control command. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::non_blocking_io command(true); * socket.io_control(command); * @endcode * * @par Concepts: * IO_Control_Command, Boolean_IO_Control_Command. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined non_blocking_io; #else typedef asio::detail::io_control::non_blocking_io non_blocking_io; #endif /// IO control command to get the amount of data that can be read without /// blocking. /** * Implements the FIONREAD IO control command. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::socket_base::bytes_readable command(true); * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode * * @par Concepts: * IO_Control_Command, Size_IO_Control_Command. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined bytes_readable; #else typedef asio::detail::io_control::bytes_readable bytes_readable; #endif /// The maximum length of the queue of pending incoming connections. #if defined(GENERATING_DOCUMENTATION) static const int max_connections = implementation_defined; #else ASIO_STATIC_CONSTANT(int, max_connections = ASIO_OS_DEF(SOMAXCONN)); #endif protected: /// Protected destructor to prevent deletion through this type. ~socket_base() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SOCKET_BASE_HPP galera-26.4.3/asio/asio/coroutine.hpp0000664000177500017540000002267413540715002015777 0ustar dbartmy// // coroutine.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_COROUTINE_HPP #define ASIO_COROUTINE_HPP namespace asio { namespace detail { class coroutine_ref; } // namespace detail /// Provides support for implementing stackless coroutines. /** * The @c coroutine class may be used to implement stackless coroutines. The * class itself is used to store the current state of the coroutine. * * Coroutines are copy-constructible and assignable, and the space overhead is * a single int. They can be used as a base class: * * @code class session : coroutine * { * ... * }; @endcode * * or as a data member: * * @code class session * { * ... * coroutine coro_; * }; @endcode * * or even bound in as a function argument using lambdas or @c bind(). The * important thing is that as the application maintains a copy of the object * for as long as the coroutine must be kept alive. * * @par Pseudo-keywords * * A coroutine is used in conjunction with certain "pseudo-keywords", which * are implemented as macros. These macros are defined by a header file: * * @code #include @endcode * * and may conversely be undefined as follows: * * @code #include @endcode * * reenter * * The @c reenter macro is used to define the body of a coroutine. It takes a * single argument: a pointer or reference to a coroutine object. For example, * if the base class is a coroutine object you may write: * * @code reenter (this) * { * ... coroutine body ... * } @endcode * * and if a data member or other variable you can write: * * @code reenter (coro_) * { * ... coroutine body ... * } @endcode * * When @c reenter is executed at runtime, control jumps to the location of the * last @c yield or @c fork. * * The coroutine body may also be a single statement, such as: * * @code reenter (this) for (;;) * { * ... * } @endcode * * @b Limitation: The @c reenter macro is implemented using a switch. This * means that you must take care when using local variables within the * coroutine body. The local variable is not allowed in a position where * reentering the coroutine could bypass the variable definition. * * yield statement * * This form of the @c yield keyword is often used with asynchronous operations: * * @code yield socket_->async_read_some(buffer(*buffer_), *this); @endcode * * This divides into four logical steps: * * @li @c yield saves the current state of the coroutine. * @li The statement initiates the asynchronous operation. * @li The resume point is defined immediately following the statement. * @li Control is transferred to the end of the coroutine body. * * When the asynchronous operation completes, the function object is invoked * and @c reenter causes control to transfer to the resume point. It is * important to remember to carry the coroutine state forward with the * asynchronous operation. In the above snippet, the current class is a * function object object with a coroutine object as base class or data member. * * The statement may also be a compound statement, and this permits us to * define local variables with limited scope: * * @code yield * { * mutable_buffers_1 b = buffer(*buffer_); * socket_->async_read_some(b, *this); * } @endcode * * yield return expression ; * * This form of @c yield is often used in generators or coroutine-based parsers. * For example, the function object: * * @code struct interleave : coroutine * { * istream& is1; * istream& is2; * char operator()(char c) * { * reenter (this) for (;;) * { * yield return is1.get(); * yield return is2.get(); * } * } * }; @endcode * * defines a trivial coroutine that interleaves the characters from two input * streams. * * This type of @c yield divides into three logical steps: * * @li @c yield saves the current state of the coroutine. * @li The resume point is defined immediately following the semicolon. * @li The value of the expression is returned from the function. * * yield ; * * This form of @c yield is equivalent to the following steps: * * @li @c yield saves the current state of the coroutine. * @li The resume point is defined immediately following the semicolon. * @li Control is transferred to the end of the coroutine body. * * This form might be applied when coroutines are used for cooperative * threading and scheduling is explicitly managed. For example: * * @code struct task : coroutine * { * ... * void operator()() * { * reenter (this) * { * while (... not finished ...) * { * ... do something ... * yield; * ... do some more ... * yield; * } * } * } * ... * }; * ... * task t1, t2; * for (;;) * { * t1(); * t2(); * } @endcode * * yield break ; * * The final form of @c yield is used to explicitly terminate the coroutine. * This form is comprised of two steps: * * @li @c yield sets the coroutine state to indicate termination. * @li Control is transferred to the end of the coroutine body. * * Once terminated, calls to is_complete() return true and the coroutine cannot * be reentered. * * Note that a coroutine may also be implicitly terminated if the coroutine * body is exited without a yield, e.g. by return, throw or by running to the * end of the body. * * fork statement * * The @c fork pseudo-keyword is used when "forking" a coroutine, i.e. splitting * it into two (or more) copies. One use of @c fork is in a server, where a new * coroutine is created to handle each client connection: * * @code reenter (this) * { * do * { * socket_.reset(new tcp::socket(io_service_)); * yield acceptor->async_accept(*socket_, *this); * fork server(*this)(); * } while (is_parent()); * ... client-specific handling follows ... * } @endcode * * The logical steps involved in a @c fork are: * * @li @c fork saves the current state of the coroutine. * @li The statement creates a copy of the coroutine and either executes it * immediately or schedules it for later execution. * @li The resume point is defined immediately following the semicolon. * @li For the "parent", control immediately continues from the next line. * * The functions is_parent() and is_child() can be used to differentiate * between parent and child. You would use these functions to alter subsequent * control flow. * * Note that @c fork doesn't do the actual forking by itself. It is the * application's responsibility to create a clone of the coroutine and call it. * The clone can be called immediately, as above, or scheduled for delayed * execution using something like io_service::post(). * * @par Alternate macro names * * If preferred, an application can use macro names that follow a more typical * naming convention, rather than the pseudo-keywords. These are: * * @li @c ASIO_CORO_REENTER instead of @c reenter * @li @c ASIO_CORO_YIELD instead of @c yield * @li @c ASIO_CORO_FORK instead of @c fork */ class coroutine { public: /// Constructs a coroutine in its initial state. coroutine() : value_(0) {} /// Returns true if the coroutine is the child of a fork. bool is_child() const { return value_ < 0; } /// Returns true if the coroutine is the parent of a fork. bool is_parent() const { return !is_child(); } /// Returns true if the coroutine has reached its terminal state. bool is_complete() const { return value_ == -1; } private: friend class detail::coroutine_ref; int value_; }; namespace detail { class coroutine_ref { public: coroutine_ref(coroutine& c) : value_(c.value_), modified_(false) {} coroutine_ref(coroutine* c) : value_(c->value_), modified_(false) {} ~coroutine_ref() { if (!modified_) value_ = -1; } operator int() const { return value_; } int& operator=(int v) { modified_ = true; return value_ = v; } private: void operator=(const coroutine_ref&); int& value_; bool modified_; }; } // namespace detail } // namespace asio #define ASIO_CORO_REENTER(c) \ switch (::asio::detail::coroutine_ref _coro_value = c) \ case -1: if (_coro_value) \ { \ goto terminate_coroutine; \ terminate_coroutine: \ _coro_value = -1; \ goto bail_out_of_coroutine; \ bail_out_of_coroutine: \ break; \ } \ else case 0: #define ASIO_CORO_YIELD_IMPL(n) \ for (_coro_value = (n);;) \ if (_coro_value == 0) \ { \ case (n): ; \ break; \ } \ else \ switch (_coro_value ? 0 : 1) \ for (;;) \ case -1: if (_coro_value) \ goto terminate_coroutine; \ else for (;;) \ case 1: if (_coro_value) \ goto bail_out_of_coroutine; \ else case 0: #define ASIO_CORO_FORK_IMPL(n) \ for (_coro_value = -(n);; _coro_value = (n)) \ if (_coro_value == (n)) \ { \ case -(n): ; \ break; \ } \ else #if defined(_MSC_VER) # define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__COUNTER__ + 1) # define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__COUNTER__ + 1) #else // defined(_MSC_VER) # define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__LINE__) # define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__LINE__) #endif // defined(_MSC_VER) #endif // ASIO_COROUTINE_HPP galera-26.4.3/asio/asio/read_at.hpp0000664000177500017540000006176613540715002015374 0ustar dbartmy// // read_at.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_READ_AT_HPP #define ASIO_READ_AT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/basic_streambuf_fwd.hpp" #include "asio/detail/cstdint.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup read_at asio::read_at * * @brief Attempt to read a certain amount of data at the specified offset * before returning. */ /*@{*/ /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, buffers, * asio::transfer_all()); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, * asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec); #if !defined(ASIO_NO_IOSTREAM) /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, b, * asio::transfer_all()); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, b, * asio::transfer_all(), ec); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, asio::error_code& ec); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) /*@}*/ /** * @defgroup async_read_at asio::async_read_at * * @brief Start an asynchronous operation to read a certain amount of data at * the specified offset. */ /*@{*/ /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * asio::async_read_at(d, 42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::async_read_at( * d, 42, buffers, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's async_read_some_at function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::async_read_at(d, 42, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note This overload is equivalent to calling: * @code asio::async_read_at( * d, 42, b, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's async_read_some_at function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation of * the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/read_at.hpp" #endif // ASIO_READ_AT_HPP galera-26.4.3/asio/asio/io_service.hpp0000664000177500017540000006623213540715002016115 0ustar dbartmy// // io_service.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IO_SERVICE_HPP #define ASIO_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/async_result.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/error_code.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/winsock_init.hpp" #elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \ || defined(__osf__) # include "asio/detail/signal_init.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { class io_service; template Service& use_service(io_service& ios); template void add_service(io_service& ios, Service* svc); template bool has_service(io_service& ios); namespace detail { #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_service io_service_impl; class win_iocp_overlapped_ptr; #else typedef class task_io_service io_service_impl; #endif class service_registry; } // namespace detail /// Provides core I/O functionality. /** * The io_service class provides the core I/O functionality for users of the * asynchronous I/O objects, including: * * @li asio::ip::tcp::socket * @li asio::ip::tcp::acceptor * @li asio::ip::udp::socket * @li asio::deadline_timer. * * The io_service class also includes facilities intended for developers of * custom asynchronous services. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe, with the specific exceptions of the reset() and * notify_fork() functions. Calling reset() while there are unfinished run(), * run_one(), poll() or poll_one() calls results in undefined behaviour. The * notify_fork() function should not be called while any io_service function, * or any function on an I/O object that is associated with the io_service, is * being called in another thread. * * @par Concepts: * Dispatcher. * * @par Synchronous and asynchronous operations * * Synchronous operations on I/O objects implicitly run the io_service object * for an individual operation. The io_service functions run(), run_one(), * poll() or poll_one() must be called for the io_service to perform * asynchronous operations on behalf of a C++ program. Notification that an * asynchronous operation has completed is delivered by invocation of the * associated handler. Handlers are invoked only by a thread that is currently * calling any overload of run(), run_one(), poll() or poll_one() for the * io_service. * * @par Effect of exceptions thrown from handlers * * If an exception is thrown from a handler, the exception is allowed to * propagate through the throwing thread's invocation of run(), run_one(), * poll() or poll_one(). No other threads that are calling any of these * functions are affected. It is then the responsibility of the application to * catch the exception. * * After the exception has been caught, the run(), run_one(), poll() or * poll_one() call may be restarted @em without the need for an intervening * call to reset(). This allows the thread to rejoin the io_service object's * thread pool without impacting any other threads in the pool. * * For example: * * @code * asio::io_service io_service; * ... * for (;;) * { * try * { * io_service.run(); * break; // run() exited normally * } * catch (my_exception& e) * { * // Deal with exception as appropriate. * } * } * @endcode * * @par Stopping the io_service from running out of work * * Some applications may need to prevent an io_service object's run() call from * returning when there is no more work to do. For example, the io_service may * be being run in a background thread that is launched prior to the * application's asynchronous operations. The run() call may be kept running by * creating an object of type asio::io_service::work: * * @code asio::io_service io_service; * asio::io_service::work work(io_service); * ... @endcode * * To effect a shutdown, the application will then need to call the io_service * object's stop() member function. This will cause the io_service run() call * to return as soon as possible, abandoning unfinished operations and without * permitting ready handlers to be dispatched. * * Alternatively, if the application requires that all operations and handlers * be allowed to finish normally, the work object may be explicitly destroyed. * * @code asio::io_service io_service; * auto_ptr work( * new asio::io_service::work(io_service)); * ... * work.reset(); // Allow run() to exit. @endcode * * @par The io_service class and I/O services * * Class io_service implements an extensible, type-safe, polymorphic set of I/O * services, indexed by service type. An object of class io_service must be * initialised before I/O objects such as sockets, resolvers and timers can be * used. These I/O objects are distinguished by having constructors that accept * an @c io_service& parameter. * * I/O services exist to manage the logical interface to the operating system on * behalf of the I/O objects. In particular, there are resources that are shared * across a class of I/O objects. For example, timers may be implemented in * terms of a single timer queue. The I/O services manage these shared * resources. * * Access to the services of an io_service is via three function templates, * use_service(), add_service() and has_service(). * * In a call to @c use_service(), the type argument chooses a service, * making available all members of the named type. If @c Service is not present * in an io_service, an object of type @c Service is created and added to the * io_service. A C++ program can check if an io_service implements a * particular service with the function template @c has_service(). * * Service objects may be explicitly added to an io_service using the function * template @c add_service(). If the @c Service is already present, the * service_already_exists exception is thrown. If the owner of the service is * not the same object as the io_service parameter, the invalid_service_owner * exception is thrown. * * Once a service reference is obtained from an io_service object by calling * use_service(), that reference remains usable as long as the owning io_service * object exists. * * All I/O service implementations have io_service::service as a public base * class. Custom I/O services may be implemented by deriving from this class and * then added to an io_service using the facilities described above. */ class io_service : private noncopyable { private: typedef detail::io_service_impl impl_type; #if defined(ASIO_HAS_IOCP) friend class detail::win_iocp_overlapped_ptr; #endif public: class work; friend class work; class id; class service; class strand; /// Constructor. ASIO_DECL io_service(); /// Constructor. /** * Construct with a hint about the required level of concurrency. * * @param concurrency_hint A suggestion to the implementation on how many * threads it should allow to run simultaneously. */ ASIO_DECL explicit io_service(std::size_t concurrency_hint); /// Destructor. /** * On destruction, the io_service performs the following sequence of * operations: * * @li For each service object @c svc in the io_service set, in reverse order * of the beginning of service object lifetime, performs * @c svc->shutdown_service(). * * @li Uninvoked handler objects that were scheduled for deferred invocation * on the io_service, or any associated strand, are destroyed. * * @li For each service object @c svc in the io_service set, in reverse order * of the beginning of service object lifetime, performs * delete static_cast(svc). * * @note The destruction sequence described above permits programs to * simplify their resource management by using @c shared_ptr<>. Where an * object's lifetime is tied to the lifetime of a connection (or some other * sequence of asynchronous operations), a @c shared_ptr to the object would * be bound into the handlers for all asynchronous operations associated with * it. This works as follows: * * @li When a single connection ends, all associated asynchronous operations * complete. The corresponding handler objects are destroyed, and all * @c shared_ptr references to the objects are destroyed. * * @li To shut down the whole program, the io_service function stop() is * called to terminate any run() calls as soon as possible. The io_service * destructor defined above destroys all handlers, causing all @c shared_ptr * references to all connection objects to be destroyed. */ ASIO_DECL ~io_service(); /// Run the io_service object's event processing loop. /** * The run() function blocks until all work has finished and there are no * more handlers to be dispatched, or until the io_service has been stopped. * * Multiple threads may call the run() function to set up a pool of threads * from which the io_service may execute handlers. All threads that are * waiting in the pool are equivalent and the io_service may choose any one * of them to invoke a handler. * * A normal exit from the run() function implies that the io_service object * is stopped (the stopped() function returns @c true). Subsequent calls to * run(), run_one(), poll() or poll_one() will return immediately unless there * is a prior call to reset(). * * @return The number of handlers that were executed. * * @throws asio::system_error Thrown on failure. * * @note The run() function must not be called from a thread that is currently * calling one of run(), run_one(), poll() or poll_one() on the same * io_service object. * * The poll() function may also be used to dispatch ready handlers, but * without blocking. */ ASIO_DECL std::size_t run(); /// Run the io_service object's event processing loop. /** * The run() function blocks until all work has finished and there are no * more handlers to be dispatched, or until the io_service has been stopped. * * Multiple threads may call the run() function to set up a pool of threads * from which the io_service may execute handlers. All threads that are * waiting in the pool are equivalent and the io_service may choose any one * of them to invoke a handler. * * A normal exit from the run() function implies that the io_service object * is stopped (the stopped() function returns @c true). Subsequent calls to * run(), run_one(), poll() or poll_one() will return immediately unless there * is a prior call to reset(). * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. * * @note The run() function must not be called from a thread that is currently * calling one of run(), run_one(), poll() or poll_one() on the same * io_service object. * * The poll() function may also be used to dispatch ready handlers, but * without blocking. */ ASIO_DECL std::size_t run(asio::error_code& ec); /// Run the io_service object's event processing loop to execute at most one /// handler. /** * The run_one() function blocks until one handler has been dispatched, or * until the io_service has been stopped. * * @return The number of handlers that were executed. A zero return value * implies that the io_service object is stopped (the stopped() function * returns @c true). Subsequent calls to run(), run_one(), poll() or * poll_one() will return immediately unless there is a prior call to * reset(). * * @throws asio::system_error Thrown on failure. */ ASIO_DECL std::size_t run_one(); /// Run the io_service object's event processing loop to execute at most one /// handler. /** * The run_one() function blocks until one handler has been dispatched, or * until the io_service has been stopped. * * @return The number of handlers that were executed. A zero return value * implies that the io_service object is stopped (the stopped() function * returns @c true). Subsequent calls to run(), run_one(), poll() or * poll_one() will return immediately unless there is a prior call to * reset(). * * @return The number of handlers that were executed. */ ASIO_DECL std::size_t run_one(asio::error_code& ec); /// Run the io_service object's event processing loop to execute ready /// handlers. /** * The poll() function runs handlers that are ready to run, without blocking, * until the io_service has been stopped or there are no more ready handlers. * * @return The number of handlers that were executed. * * @throws asio::system_error Thrown on failure. */ ASIO_DECL std::size_t poll(); /// Run the io_service object's event processing loop to execute ready /// handlers. /** * The poll() function runs handlers that are ready to run, without blocking, * until the io_service has been stopped or there are no more ready handlers. * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. */ ASIO_DECL std::size_t poll(asio::error_code& ec); /// Run the io_service object's event processing loop to execute one ready /// handler. /** * The poll_one() function runs at most one handler that is ready to run, * without blocking. * * @return The number of handlers that were executed. * * @throws asio::system_error Thrown on failure. */ ASIO_DECL std::size_t poll_one(); /// Run the io_service object's event processing loop to execute one ready /// handler. /** * The poll_one() function runs at most one handler that is ready to run, * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. */ ASIO_DECL std::size_t poll_one(asio::error_code& ec); /// Stop the io_service object's event processing loop. /** * This function does not block, but instead simply signals the io_service to * stop. All invocations of its run() or run_one() member functions should * return as soon as possible. Subsequent calls to run(), run_one(), poll() * or poll_one() will return immediately until reset() is called. */ ASIO_DECL void stop(); /// Determine whether the io_service object has been stopped. /** * This function is used to determine whether an io_service object has been * stopped, either through an explicit call to stop(), or due to running out * of work. When an io_service object is stopped, calls to run(), run_one(), * poll() or poll_one() will return immediately without invoking any * handlers. * * @return @c true if the io_service object is stopped, otherwise @c false. */ ASIO_DECL bool stopped() const; /// Reset the io_service in preparation for a subsequent run() invocation. /** * This function must be called prior to any second or later set of * invocations of the run(), run_one(), poll() or poll_one() functions when a * previous invocation of these functions returned due to the io_service * being stopped or running out of work. After a call to reset(), the * io_service object's stopped() function will return @c false. * * This function must not be called while there are any unfinished calls to * the run(), run_one(), poll() or poll_one() functions. */ ASIO_DECL void reset(); /// Request the io_service to invoke the given handler. /** * This function is used to ask the io_service to execute the given handler. * * The io_service guarantees that the handler will only be called in a thread * in which the run(), run_one(), poll() or poll_one() member functions is * currently being invoked. The handler may be executed inside this function * if the guarantee can be met. * * @param handler The handler to be called. The io_service will make * a copy of the handler object as required. The function signature of the * handler must be: @code void handler(); @endcode * * @note This function throws an exception only if: * * @li the handler's @c asio_handler_allocate function; or * * @li the handler's copy constructor * * throws an exception. */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) dispatch(ASIO_MOVE_ARG(CompletionHandler) handler); /// Request the io_service to invoke the given handler and return immediately. /** * This function is used to ask the io_service to execute the given handler, * but without allowing the io_service to call the handler from inside this * function. * * The io_service guarantees that the handler will only be called in a thread * in which the run(), run_one(), poll() or poll_one() member functions is * currently being invoked. * * @param handler The handler to be called. The io_service will make * a copy of the handler object as required. The function signature of the * handler must be: @code void handler(); @endcode * * @note This function throws an exception only if: * * @li the handler's @c asio_handler_allocate function; or * * @li the handler's copy constructor * * throws an exception. */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) post(ASIO_MOVE_ARG(CompletionHandler) handler); /// Create a new handler that automatically dispatches the wrapped handler /// on the io_service. /** * This function is used to create a new handler function object that, when * invoked, will automatically pass the wrapped handler to the io_service * object's dispatch function. * * @param handler The handler to be wrapped. The io_service will make a copy * of the handler object as required. The function signature of the handler * must be: @code void handler(A1 a1, ... An an); @endcode * * @return A function object that, when invoked, passes the wrapped handler to * the io_service object's dispatch function. Given a function object with the * signature: * @code R f(A1 a1, ... An an); @endcode * If this function object is passed to the wrap function like so: * @code io_service.wrap(f); @endcode * then the return value is a function object with the signature * @code void g(A1 a1, ... An an); @endcode * that, when invoked, executes code equivalent to: * @code io_service.dispatch(boost::bind(f, a1, ... an)); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else detail::wrapped_handler #endif wrap(Handler handler); /// Fork-related event notifications. enum fork_event { /// Notify the io_service that the process is about to fork. fork_prepare, /// Notify the io_service that the process has forked and is the parent. fork_parent, /// Notify the io_service that the process has forked and is the child. fork_child }; /// Notify the io_service of a fork-related event. /** * This function is used to inform the io_service that the process is about * to fork, or has just forked. This allows the io_service, and the services * it contains, to perform any necessary housekeeping to ensure correct * operation following a fork. * * This function must not be called while any other io_service function, or * any function on an I/O object associated with the io_service, is being * called in another thread. It is, however, safe to call this function from * within a completion handler, provided no other thread is accessing the * io_service. * * @param event A fork-related event. * * @throws asio::system_error Thrown on failure. If the notification * fails the io_service object should no longer be used and should be * destroyed. * * @par Example * The following code illustrates how to incorporate the notify_fork() * function: * @code my_io_service.notify_fork(asio::io_service::fork_prepare); * if (fork() == 0) * { * // This is the child process. * my_io_service.notify_fork(asio::io_service::fork_child); * } * else * { * // This is the parent process. * my_io_service.notify_fork(asio::io_service::fork_parent); * } @endcode * * @note For each service object @c svc in the io_service set, performs * svc->fork_service();. When processing the fork_prepare event, * services are visited in reverse order of the beginning of service object * lifetime. Otherwise, services are visited in order of the beginning of * service object lifetime. */ ASIO_DECL void notify_fork(asio::io_service::fork_event event); /// Obtain the service object corresponding to the given type. /** * This function is used to locate a service object that corresponds to * the given service type. If there is no existing implementation of the * service, then the io_service will create a new instance of the service. * * @param ios The io_service object that owns the service. * * @return The service interface implementing the specified service type. * Ownership of the service interface is not transferred to the caller. */ template friend Service& use_service(io_service& ios); /// Add a service object to the io_service. /** * This function is used to add a service to the io_service. * * @param ios The io_service object that owns the service. * * @param svc The service object. On success, ownership of the service object * is transferred to the io_service. When the io_service object is destroyed, * it will destroy the service object by performing: * @code delete static_cast(svc) @endcode * * @throws asio::service_already_exists Thrown if a service of the * given type is already present in the io_service. * * @throws asio::invalid_service_owner Thrown if the service's owning * io_service is not the io_service object specified by the ios parameter. */ template friend void add_service(io_service& ios, Service* svc); /// Determine if an io_service contains a specified service type. /** * This function is used to determine whether the io_service contains a * service object corresponding to the given service type. * * @param ios The io_service object that owns the service. * * @return A boolean indicating whether the io_service contains the service. */ template friend bool has_service(io_service& ios); private: #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) detail::winsock_init<> init_; #elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \ || defined(__osf__) detail::signal_init<> init_; #endif // The service registry. asio::detail::service_registry* service_registry_; // The implementation. impl_type& impl_; }; /// Class to inform the io_service when it has work to do. /** * The work class is used to inform the io_service when work starts and * finishes. This ensures that the io_service object's run() function will not * exit while work is underway, and that it does exit when there is no * unfinished work remaining. * * The work class is copy-constructible so that it may be used as a data member * in a handler class. It is not assignable. */ class io_service::work { public: /// Constructor notifies the io_service that work is starting. /** * The constructor is used to inform the io_service that some work has begun. * This ensures that the io_service object's run() function will not exit * while the work is underway. */ explicit work(asio::io_service& io_service); /// Copy constructor notifies the io_service that work is starting. /** * The constructor is used to inform the io_service that some work has begun. * This ensures that the io_service object's run() function will not exit * while the work is underway. */ work(const work& other); /// Destructor notifies the io_service that the work is complete. /** * The destructor is used to inform the io_service that some work has * finished. Once the count of unfinished work reaches zero, the io_service * object's run() function is permitted to exit. */ ~work(); /// Get the io_service associated with the work. asio::io_service& get_io_service(); private: // Prevent assignment. void operator=(const work& other); // The io_service implementation. detail::io_service_impl& io_service_impl_; }; /// Class used to uniquely identify a service. class io_service::id : private noncopyable { public: /// Constructor. id() {} }; /// Base class for all io_service services. class io_service::service : private noncopyable { public: /// Get the io_service object that owns the service. asio::io_service& get_io_service(); protected: /// Constructor. /** * @param owner The io_service object that owns the service. */ ASIO_DECL service(asio::io_service& owner); /// Destructor. ASIO_DECL virtual ~service(); private: /// Destroy all user-defined handler objects owned by the service. virtual void shutdown_service() = 0; /// Handle notification of a fork-related event to perform any necessary /// housekeeping. /** * This function is not a pure virtual so that services only have to * implement it if necessary. The default implementation does nothing. */ ASIO_DECL virtual void fork_service( asio::io_service::fork_event event); friend class asio::detail::service_registry; struct key { key() : type_info_(0), id_(0) {} const std::type_info* type_info_; const asio::io_service::id* id_; } key_; asio::io_service& owner_; service* next_; }; /// Exception thrown when trying to add a duplicate service to an io_service. class service_already_exists : public std::logic_error { public: ASIO_DECL service_already_exists(); }; /// Exception thrown when trying to add a service object to an io_service where /// the service has a different owner. class invalid_service_owner : public std::logic_error { public: ASIO_DECL invalid_service_owner(); }; namespace detail { // Special derived service id type to keep classes header-file only. template class service_id : public asio::io_service::id { }; // Special service base class to keep classes header-file only. template class service_base : public asio::io_service::service { public: static asio::detail::service_id id; // Constructor. service_base(asio::io_service& io_service) : asio::io_service::service(io_service) { } }; template asio::detail::service_id service_base::id; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/io_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/io_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IO_SERVICE_HPP galera-26.4.3/asio/asio/buffered_stream_fwd.hpp0000664000177500017540000000114413540715002017752 0ustar dbartmy// // buffered_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_STREAM_FWD_HPP #define ASIO_BUFFERED_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_stream; } // namespace asio #endif // ASIO_BUFFERED_STREAM_FWD_HPP galera-26.4.3/asio/asio/basic_socket.hpp0000664000177500017540000014265113540715002016417 0ustar dbartmy// // basic_socket.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_HPP #define ASIO_BASIC_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/basic_io_object.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides socket functionality. /** * The basic_socket class template provides functionality that is common to both * stream-oriented and datagram-oriented sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_socket : public basic_io_object, public socket_base { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename SocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename SocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// A basic_socket is always the lowest layer. typedef basic_socket lowest_layer_type; /// Construct a basic_socket without opening it. /** * This constructor creates a socket without opening it. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_socket(asio::io_service& io_service) : basic_io_object(io_service) { } /// Construct and open a basic_socket. /** * This constructor creates and opens a socket. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_io_object(io_service) { asio::error_code ec; this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Construct a basic_socket, opening it and binding it to the given local /// endpoint. /** * This constructor creates a socket and automatically opens it bound to the * specified endpoint on the local machine. The protocol used is the protocol * associated with the given endpoint. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_io_object(io_service) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); this->get_service().bind(this->get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Construct a basic_socket on an existing native socket. /** * This constructor creates a socket object to hold an existing native socket. * * @param io_service The io_service object that the socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ basic_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_io_object(io_service) { asio::error_code ec; this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_socket from another. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ basic_socket(basic_socket&& other) : basic_io_object( ASIO_MOVE_CAST(basic_socket)(other)) { } /// Move-assign a basic_socket from another. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ basic_socket& operator=(basic_socket&& other) { basic_io_object::operator=( ASIO_MOVE_CAST(basic_socket)(other)); return *this; } // All sockets have access to each other's implementations. template friend class basic_socket; /// Move-construct a basic_socket from a socket of another protocol type. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ template basic_socket(basic_socket&& other, typename enable_if::value>::type* = 0) : basic_io_object(other.get_io_service()) { this->get_service().template converting_move_construct( this->get_implementation(), other.get_implementation()); } /// Move-assign a basic_socket from a socket of another protocol type. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(io_service&) constructor. */ template typename enable_if::value, basic_socket>::type& operator=( basic_socket&& other) { basic_socket tmp(ASIO_MOVE_CAST2(basic_socket< Protocol1, SocketService1>)(other)); basic_io_object::operator=( ASIO_MOVE_CAST(basic_socket)(tmp)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * @endcode */ void open(const protocol_type& protocol = protocol_type()) { asio::error_code ec; this->get_service().open(this->get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying which protocol is to be used. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::error_code ec; * socket.open(asio::ip::tcp::v4(), ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code open(const protocol_type& protocol, asio::error_code& ec) { return this->get_service().open(this->get_implementation(), protocol, ec); } /// Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ void assign(const protocol_type& protocol, const native_handle_type& native_socket) { asio::error_code ec; this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code assign(const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { return this->get_service().assign(this->get_implementation(), protocol, native_socket, ec); } /// Determine whether the socket is open. bool is_open() const { return this->get_service().is_open(this->get_implementation()); } /// Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ void close() { asio::error_code ec; this->get_service().close(this->get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * socket.close(ec); * if (ec) * { * // An error occurred. * } * @endcode * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ asio::error_code close(asio::error_code& ec) { return this->get_service().close(this->get_implementation(), ec); } /// (Deprecated: Use native_handle().) Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_type native() { return this->get_service().native_handle(this->get_implementation()); } /// Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_handle_type native_handle() { return this->get_service().native_handle(this->get_implementation()); } /// Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. * * @note Calls to cancel() will always fail with * asio::error::operation_not_supported when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(ASIO_ENABLE_CANCELIO) __declspec(deprecated("By default, this function always fails with " "operation_not_supported when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif void cancel() { asio::error_code ec; this->get_service().cancel(this->get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. * * @note Calls to cancel() will always fail with * asio::error::operation_not_supported when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(ASIO_ENABLE_CANCELIO) __declspec(deprecated("By default, this function always fails with " "operation_not_supported when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif asio::error_code cancel(asio::error_code& ec) { return this->get_service().cancel(this->get_implementation(), ec); } /// Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @return A bool indicating whether the socket is at the out-of-band data * mark. * * @throws asio::system_error Thrown on failure. */ bool at_mark() const { asio::error_code ec; bool b = this->get_service().at_mark(this->get_implementation(), ec); asio::detail::throw_error(ec, "at_mark"); return b; } /// Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @param ec Set to indicate what error occurred, if any. * * @return A bool indicating whether the socket is at the out-of-band data * mark. */ bool at_mark(asio::error_code& ec) const { return this->get_service().at_mark(this->get_implementation(), ec); } /// Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. * * @throws asio::system_error Thrown on failure. */ std::size_t available() const { asio::error_code ec; std::size_t s = this->get_service().available( this->get_implementation(), ec); asio::detail::throw_error(ec, "available"); return s; } /// Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. */ std::size_t available(asio::error_code& ec) const { return this->get_service().available(this->get_implementation(), ec); } /// Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345)); * @endcode */ void bind(const endpoint_type& endpoint) { asio::error_code ec; this->get_service().bind(this->get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * socket.open(asio::ip::tcp::v4()); * asio::error_code ec; * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345), ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code bind(const endpoint_type& endpoint, asio::error_code& ec) { return this->get_service().bind(this->get_implementation(), endpoint, ec); } /// Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.connect(endpoint); * @endcode */ void connect(const endpoint_type& peer_endpoint) { asio::error_code ec; if (!is_open()) { this->get_service().open(this->get_implementation(), peer_endpoint.protocol(), ec); asio::detail::throw_error(ec, "connect"); } this->get_service().connect(this->get_implementation(), peer_endpoint, ec); asio::detail::throw_error(ec, "connect"); } /// Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * asio::error_code ec; * socket.connect(endpoint, ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code connect(const endpoint_type& peer_endpoint, asio::error_code& ec) { if (!is_open()) { if (this->get_service().open(this->get_implementation(), peer_endpoint.protocol(), ec)) { return ec; } } return this->get_service().connect( this->get_implementation(), peer_endpoint, ec); } /// Start an asynchronous connect. /** * This function is used to asynchronously connect a socket to the specified * remote endpoint. The function call always returns immediately. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. Copies will be made of the endpoint object as required. * * @param handler The handler to be called when the connection operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * @code * void connect_handler(const asio::error_code& error) * { * if (!error) * { * // Connect succeeded. * } * } * * ... * * asio::ip::tcp::socket socket(io_service); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_connect(endpoint, connect_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ConnectHandler, void (asio::error_code)) async_connect(const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ConnectHandler. ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check; if (!is_open()) { asio::error_code ec; const protocol_type protocol = peer_endpoint.protocol(); if (this->get_service().open(this->get_implementation(), protocol, ec)) { detail::async_result_init< ConnectHandler, void (asio::error_code)> init( ASIO_MOVE_CAST(ConnectHandler)(handler)); this->get_io_service().post( asio::detail::bind_handler( ASIO_MOVE_CAST(ASIO_HANDLER_TYPE( ConnectHandler, void (asio::error_code)))( init.handler), ec)); return init.result.get(); } } return this->get_service().async_connect(this->get_implementation(), peer_endpoint, ASIO_MOVE_CAST(ConnectHandler)(handler)); } /// Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @throws asio::system_error Thrown on failure. * * @sa SettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * socket.set_option(option); * @endcode */ template void set_option(const SettableSocketOption& option) { asio::error_code ec; this->get_service().set_option(this->get_implementation(), option, ec); asio::detail::throw_error(ec, "set_option"); } /// Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa SettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::no_delay option(true); * asio::error_code ec; * socket.set_option(option, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template asio::error_code set_option(const SettableSocketOption& option, asio::error_code& ec) { return this->get_service().set_option( this->get_implementation(), option, ec); } /// Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @throws asio::system_error Thrown on failure. * * @sa GettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode */ template void get_option(GettableSocketOption& option) const { asio::error_code ec; this->get_service().get_option(this->get_implementation(), option, ec); asio::detail::throw_error(ec, "get_option"); } /// Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa GettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::keep_alive option; * asio::error_code ec; * socket.get_option(option, ec); * if (ec) * { * // An error occurred. * } * bool is_set = option.value(); * @endcode */ template asio::error_code get_option(GettableSocketOption& option, asio::error_code& ec) const { return this->get_service().get_option( this->get_implementation(), option, ec); } /// Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @throws asio::system_error Thrown on failure. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::bytes_readable command; * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode */ template void io_control(IoControlCommand& command) { asio::error_code ec; this->get_service().io_control(this->get_implementation(), command, ec); asio::detail::throw_error(ec, "io_control"); } /// Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::socket::bytes_readable command; * asio::error_code ec; * socket.io_control(command, ec); * if (ec) * { * // An error occurred. * } * std::size_t bytes_readable = command.get(); * @endcode */ template asio::error_code io_control(IoControlCommand& command, asio::error_code& ec) { return this->get_service().io_control( this->get_implementation(), command, ec); } /// Gets the non-blocking mode of the socket. /** * @returns @c true if the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ bool non_blocking() const { return this->get_service().non_blocking(this->get_implementation()); } /// Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @throws asio::system_error Thrown on failure. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ void non_blocking(bool mode) { asio::error_code ec; this->get_service().non_blocking(this->get_implementation(), mode, ec); asio::detail::throw_error(ec, "non_blocking"); } /// Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @param ec Set to indicate what error occurred, if any. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ asio::error_code non_blocking( bool mode, asio::error_code& ec) { return this->get_service().non_blocking( this->get_implementation(), mode, ec); } /// Gets the non-blocking mode of the native socket implementation. /** * This function is used to retrieve the non-blocking mode of the underlying * native socket. This mode has no effect on the behaviour of the socket * object's synchronous operations. * * @returns @c true if the underlying socket is in non-blocking mode and * direct system calls may fail with asio::error::would_block (or the * equivalent system error). * * @note The current non-blocking mode is cached by the socket object. * Consequently, the return value may be incorrect if the non-blocking mode * was set directly on the native socket. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ bool native_non_blocking() const { return this->get_service().native_non_blocking(this->get_implementation()); } /// Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @throws asio::system_error Thrown on failure. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ void native_non_blocking(bool mode) { asio::error_code ec; this->get_service().native_non_blocking( this->get_implementation(), mode, ec); asio::detail::throw_error(ec, "native_non_blocking"); } /// Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @param ec Set to indicate what error occurred, if any. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_write_some(asio::null_buffers(), *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_write_some(asio::null_buffers(), op); * } @endcode */ asio::error_code native_non_blocking( bool mode, asio::error_code& ec) { return this->get_service().native_non_blocking( this->get_implementation(), mode, ec); } /// Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @returns An object that represents the local endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(); * @endcode */ endpoint_type local_endpoint() const { asio::error_code ec; endpoint_type ep = this->get_service().local_endpoint( this->get_implementation(), ec); asio::detail::throw_error(ec, "local_endpoint"); return ep; } /// Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the local endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type local_endpoint(asio::error_code& ec) const { return this->get_service().local_endpoint(this->get_implementation(), ec); } /// Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @returns An object that represents the remote endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(); * @endcode */ endpoint_type remote_endpoint() const { asio::error_code ec; endpoint_type ep = this->get_service().remote_endpoint( this->get_implementation(), ec); asio::detail::throw_error(ec, "remote_endpoint"); return ep; } /// Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the remote endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type remote_endpoint(asio::error_code& ec) const { return this->get_service().remote_endpoint(this->get_implementation(), ec); } /// Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @throws asio::system_error Thrown on failure. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(io_service); * ... * socket.shutdown(asio::ip::tcp::socket::shutdown_send); * @endcode */ void shutdown(shutdown_type what) { asio::error_code ec; this->get_service().shutdown(this->get_implementation(), what, ec); asio::detail::throw_error(ec, "shutdown"); } /// Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(io_service); * ... * asio::error_code ec; * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec); * if (ec) * { * // An error occurred. * } * @endcode */ asio::error_code shutdown(shutdown_type what, asio::error_code& ec) { return this->get_service().shutdown(this->get_implementation(), what, ec); } protected: /// Protected destructor to prevent deletion through this type. ~basic_socket() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SOCKET_HPP galera-26.4.3/asio/asio/buffered_stream.hpp0000664000177500017540000001711713540715002017121 0ustar dbartmy// // buffered_stream.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_STREAM_HPP #define ASIO_BUFFERED_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffered_read_stream.hpp" #include "asio/buffered_write_stream.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the read- and write-related operations of a stream. /** * The buffered_stream class template can be used to add buffering to the * synchronous and asynchronous read and write operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_stream(Arg& a) : inner_stream_impl_(a), stream_impl_(inner_stream_impl_) { } /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_stream(Arg& a, std::size_t read_buffer_size, std::size_t write_buffer_size) : inner_stream_impl_(a, write_buffer_size), stream_impl_(inner_stream_impl_, read_buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return stream_impl_.next_layer().next_layer(); } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return stream_impl_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return stream_impl_.lowest_layer(); } /// Get the io_service associated with the object. asio::io_service& get_io_service() { return stream_impl_.get_io_service(); } /// Close the stream. void close() { stream_impl_.close(); } /// Close the stream. asio::error_code close(asio::error_code& ec) { return stream_impl_.close(ec); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation. Throws an /// exception on failure. std::size_t flush() { return stream_impl_.next_layer().flush(); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation, or 0 if an /// error occurred. std::size_t flush(asio::error_code& ec) { return stream_impl_.next_layer().flush(ec); } /// Start an asynchronous flush. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_flush(ASIO_MOVE_ARG(WriteHandler) handler) { return stream_impl_.next_layer().async_flush( ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers) { return stream_impl_.write_some(buffers); } /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.write_some(buffers, ec); } /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return stream_impl_.async_write_some(buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation. Throws an exception on failure. std::size_t fill() { return stream_impl_.fill(); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation, or 0 if an error occurred. std::size_t fill(asio::error_code& ec) { return stream_impl_.fill(ec); } /// Start an asynchronous fill. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_fill(ASIO_MOVE_ARG(ReadHandler) handler) { return stream_impl_.async_fill(ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers) { return stream_impl_.read_some(buffers); } /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.read_some(buffers, ec); } /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return stream_impl_.async_read_some(buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers) { return stream_impl_.peek(buffers); } /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.peek(buffers, ec); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return stream_impl_.in_avail(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { return stream_impl_.in_avail(ec); } private: // The buffered write stream. typedef buffered_write_stream write_stream_type; write_stream_type inner_stream_impl_; // The buffered read stream. typedef buffered_read_stream read_stream_type; read_stream_type stream_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BUFFERED_STREAM_HPP galera-26.4.3/asio/asio/basic_stream_socket.hpp0000664000177500017540000010251013540715002017760 0ustar dbartmy// // basic_stream_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_STREAM_SOCKET_HPP #define ASIO_BASIC_STREAM_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/stream_socket_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides stream-oriented socket functionality. /** * The basic_stream_socket class template provides asynchronous and blocking * stream-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template > class basic_stream_socket : public basic_socket { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename StreamSocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename StreamSocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_stream_socket without opening it. /** * This constructor creates a stream socket without opening it. The socket * needs to be opened and then connected or accepted before data can be sent * or received on it. * * @param io_service The io_service object that the stream socket will use to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_stream_socket(asio::io_service& io_service) : basic_socket(io_service) { } /// Construct and open a basic_stream_socket. /** * This constructor creates and opens a stream socket. The socket needs to be * connected or accepted before data can be sent or received on it. * * @param io_service The io_service object that the stream socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_stream_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_socket(io_service, protocol) { } /// Construct a basic_stream_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a stream socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param io_service The io_service object that the stream socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the stream * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_stream_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_socket(io_service, endpoint) { } /// Construct a basic_stream_socket on an existing native socket. /** * This constructor creates a stream socket object to hold an existing native * socket. * * @param io_service The io_service object that the stream socket will use to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_stream_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket( io_service, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_stream_socket from another. /** * This constructor moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(io_service&) constructor. */ basic_stream_socket(basic_stream_socket&& other) : basic_socket( ASIO_MOVE_CAST(basic_stream_socket)(other)) { } /// Move-assign a basic_stream_socket from another. /** * This assignment operator moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(io_service&) constructor. */ basic_stream_socket& operator=(basic_stream_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST(basic_stream_socket)(other)); return *this; } /// Move-construct a basic_stream_socket from a socket of another protocol /// type. /** * This constructor moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(io_service&) constructor. */ template basic_stream_socket( basic_stream_socket&& other, typename enable_if::value>::type* = 0) : basic_socket( ASIO_MOVE_CAST2(basic_stream_socket< Protocol1, StreamSocketService1>)(other)) { } /// Move-assign a basic_stream_socket from a socket of another protocol type. /** * This assignment operator moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(io_service&) constructor. */ template typename enable_if::value, basic_stream_socket>::type& operator=( basic_stream_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST2(basic_stream_socket< Protocol1, StreamSocketService1>)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Send some data on the socket. /** * This function is used to send data on the stream socket. The function * call will block until one or more bytes of the data has been sent * successfully, or an until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the stream socket. The function * call will block until one or more bytes of the data has been sent * successfully, or an until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the stream socket. The function * call will block until one or more bytes of the data has been sent * successfully, or an until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. Returns 0 if an error occurred. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the stream socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send( this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the stream socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send( this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Receive some data on the socket. /** * This function is used to receive data on the stream socket. The function * call will block until one or more bytes of data has been received * successfully, or until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on the socket. /** * This function is used to receive data on the stream socket. The function * call will block until one or more bytes of data has been received * successfully, or until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the stream socket. The function * call will block until one or more bytes of data has been received * successfully, or until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. Returns 0 if an error occurred. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().receive( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the stream * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref async_read function if you need to ensure * that the requested amount of data is received before the asynchronous * operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the stream * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref async_read function if you need to ensure * that the requested amount of data is received before the asynchronous * operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Write some data to the socket. /** * This function is used to write data to the stream socket. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the socket. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * socket.write_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "write_some"); return s; } /// Write some data to the socket. /** * This function is used to write data to the stream socket. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return this->get_service().send(this->get_implementation(), buffers, 0, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write data to the stream socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be written to the socket. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * socket.async_write_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Read some data from the socket. /** * This function is used to read data from the stream socket. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * socket.read_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "read_some"); return s; } /// Read some data from the socket. /** * This function is used to read data from the stream socket. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return this->get_service().receive( this->get_implementation(), buffers, 0, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read data from the stream socket. * The function call always returns immediately. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read function if you need to ensure that the * requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * socket.async_read_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive(this->get_implementation(), buffers, 0, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_STREAM_SOCKET_HPP galera-26.4.3/asio/asio/signal_set_service.hpp0000664000177500017540000000675113540715002017636 0ustar dbartmy// // signal_set_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SIGNAL_SET_SERVICE_HPP #define ASIO_SIGNAL_SET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/signal_set_service.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a signal set. class signal_set_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif public: /// The type of a signal set implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef detail::signal_set_service::implementation_type implementation_type; #endif /// Construct a new signal set service for the specified io_service. explicit signal_set_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(io_service) { } /// Construct a new signal set implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } /// Destroy a signal set implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Add a signal to a signal_set. asio::error_code add(implementation_type& impl, int signal_number, asio::error_code& ec) { return service_impl_.add(impl, signal_number, ec); } /// Remove a signal to a signal_set. asio::error_code remove(implementation_type& impl, int signal_number, asio::error_code& ec) { return service_impl_.remove(impl, signal_number, ec); } /// Remove all signals from a signal_set. asio::error_code clear(implementation_type& impl, asio::error_code& ec) { return service_impl_.clear(impl, ec); } /// Cancel all operations associated with the signal set. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } // Start an asynchronous operation to wait for a signal to be delivered. template ASIO_INITFN_RESULT_TYPE(SignalHandler, void (asio::error_code, int)) async_wait(implementation_type& impl, ASIO_MOVE_ARG(SignalHandler) handler) { detail::async_result_init< SignalHandler, void (asio::error_code, int)> init( ASIO_MOVE_CAST(SignalHandler)(handler)); service_impl_.async_wait(impl, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // Perform any fork-related housekeeping. void fork_service(asio::io_service::fork_event event) { service_impl_.fork_service(event); } // The platform-specific implementation. detail::signal_set_service service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SIGNAL_SET_SERVICE_HPP galera-26.4.3/asio/asio/basic_deadline_timer.hpp0000664000177500017540000004261613540715002020074 0ustar dbartmy// // basic_deadline_timer.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_DEADLINE_TIMER_HPP #define ASIO_BASIC_DEADLINE_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/basic_io_object.hpp" #include "asio/deadline_timer_service.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides waitable timer functionality. /** * The basic_deadline_timer class template provides the ability to perform a * blocking or asynchronous wait for a timer to expire. * * A deadline timer is always in one of two states: "expired" or "not expired". * If the wait() or async_wait() function is called on an expired timer, the * wait operation will complete immediately. * * Most applications will use the asio::deadline_timer typedef. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Examples * Performing a blocking wait: * @code * // Construct a timer without setting an expiry time. * asio::deadline_timer timer(io_service); * * // Set an expiry time relative to now. * timer.expires_from_now(boost::posix_time::seconds(5)); * * // Wait for the timer to expire. * timer.wait(); * @endcode * * @par * Performing an asynchronous wait: * @code * void handler(const asio::error_code& error) * { * if (!error) * { * // Timer expired. * } * } * * ... * * // Construct a timer with an absolute expiry time. * asio::deadline_timer timer(io_service, * boost::posix_time::time_from_string("2005-12-07 23:59:59.000")); * * // Start an asynchronous wait. * timer.async_wait(handler); * @endcode * * @par Changing an active deadline_timer's expiry time * * Changing the expiry time of a timer while there are pending asynchronous * waits causes those wait operations to be cancelled. To ensure that the action * associated with the timer is performed only once, use something like this: * used: * * @code * void on_some_event() * { * if (my_timer.expires_from_now(seconds(5)) > 0) * { * // We managed to cancel the timer. Start new asynchronous wait. * my_timer.async_wait(on_timeout); * } * else * { * // Too late, timer has already expired! * } * } * * void on_timeout(const asio::error_code& e) * { * if (e != asio::error::operation_aborted) * { * // Timer was not cancelled, take necessary action. * } * } * @endcode * * @li The asio::basic_deadline_timer::expires_from_now() function * cancels any pending asynchronous waits, and returns the number of * asynchronous waits that were cancelled. If it returns 0 then you were too * late and the wait handler has already been executed, or will soon be * executed. If it returns 1 then the wait handler was successfully cancelled. * * @li If a wait handler is cancelled, the asio::error_code passed to * it contains the value asio::error::operation_aborted. */ template , typename TimerService = deadline_timer_service > class basic_deadline_timer : public basic_io_object { public: /// The time traits type. typedef TimeTraits traits_type; /// The time type. typedef typename traits_type::time_type time_type; /// The duration type. typedef typename traits_type::duration_type duration_type; /// Constructor. /** * This constructor creates a timer without setting an expiry time. The * expires_at() or expires_from_now() functions must be called to set an * expiry time before the timer can be waited on. * * @param io_service The io_service object that the timer will use to dispatch * handlers for any asynchronous operations performed on the timer. */ explicit basic_deadline_timer(asio::io_service& io_service) : basic_io_object(io_service) { } /// Constructor to set a particular expiry time as an absolute time. /** * This constructor creates a timer and sets the expiry time. * * @param io_service The io_service object that the timer will use to dispatch * handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, expressed * as an absolute time. */ basic_deadline_timer(asio::io_service& io_service, const time_type& expiry_time) : basic_io_object(io_service) { asio::error_code ec; this->service.expires_at(this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); } /// Constructor to set a particular expiry time relative to now. /** * This constructor creates a timer and sets the expiry time. * * @param io_service The io_service object that the timer will use to dispatch * handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, relative to * now. */ basic_deadline_timer(asio::io_service& io_service, const duration_type& expiry_time) : basic_io_object(io_service) { asio::error_code ec; this->service.expires_from_now(this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel() { asio::error_code ec; std::size_t s = this->service.cancel(this->implementation, ec); asio::detail::throw_error(ec, "cancel"); return s; } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel(asio::error_code& ec) { return this->service.cancel(this->implementation, ec); } /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one() { asio::error_code ec; std::size_t s = this->service.cancel_one(this->implementation, ec); asio::detail::throw_error(ec, "cancel_one"); return s; } /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one(asio::error_code& ec) { return this->service.cancel_one(this->implementation, ec); } /// Get the timer's expiry time as an absolute time. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ time_type expires_at() const { return this->service.expires_at(this->implementation); } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_type& expiry_time) { asio::error_code ec; std::size_t s = this->service.expires_at( this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); return s; } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_type& expiry_time, asio::error_code& ec) { return this->service.expires_at(this->implementation, expiry_time, ec); } /// Get the timer's expiry time relative to now. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ duration_type expires_from_now() const { return this->service.expires_from_now(this->implementation); } /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration_type& expiry_time) { asio::error_code ec; std::size_t s = this->service.expires_from_now( this->implementation, expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); return s; } /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration_type& expiry_time, asio::error_code& ec) { return this->service.expires_from_now( this->implementation, expiry_time, ec); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @throws asio::system_error Thrown on failure. */ void wait() { asio::error_code ec; this->service.wait(this->implementation, ec); asio::detail::throw_error(ec, "wait"); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @param ec Set to indicate what error occurred, if any. */ void wait(asio::error_code& ec) { this->service.wait(this->implementation, ec); } /// Start an asynchronous wait on the timer. /** * This function may be used to initiate an asynchronous wait against the * timer. It always returns immediately. * * For each call to async_wait(), the supplied handler will be called exactly * once. The handler will be called when: * * @li The timer has expired. * * @li The timer was cancelled, in which case the handler is passed the error * code asio::error::operation_aborted. * * @param handler The handler to be called when the timer expires. Copies * will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(ASIO_MOVE_ARG(WaitHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; return this->service.async_wait(this->implementation, ASIO_MOVE_CAST(WaitHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_BASIC_DEADLINE_TIMER_HPP galera-26.4.3/asio/asio/thread.hpp0000664000177500017540000000440513540715002015227 0ustar dbartmy// // thread.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_THREAD_HPP #define ASIO_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// A simple abstraction for starting threads. /** * The asio::thread class implements the smallest possible subset of the * functionality of boost::thread. It is intended to be used only for starting * a thread and waiting for it to exit. If more extensive threading * capabilities are required, you are strongly advised to use something else. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * A typical use of asio::thread would be to launch a thread to run an * io_service's event processing loop: * * @par * @code asio::io_service io_service; * // ... * asio::thread t(boost::bind(&asio::io_service::run, &io_service)); * // ... * t.join(); @endcode */ class thread : private noncopyable { public: /// Start a new thread that executes the supplied function. /** * This constructor creates a new thread that will execute the given function * or function object. * * @param f The function or function object to be run in the thread. The * function signature must be: @code void f(); @endcode */ template explicit thread(Function f) : impl_(f) { } /// Destructor. ~thread() { } /// Wait for the thread to exit. /** * This function will block until the thread has exited. * * If this function is not called before the thread object is destroyed, the * thread itself will continue to run until completion. You will, however, * no longer have the ability to wait for it to exit. */ void join() { impl_.join(); } private: detail::thread impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_THREAD_HPP galera-26.4.3/asio/asio/strand.hpp0000664000177500017540000002104013540715002015245 0ustar dbartmy// // strand.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_STRAND_HPP #define ASIO_STRAND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/strand_service.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/io_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides serialised handler execution. /** * The io_service::strand class provides the ability to post and dispatch * handlers with the guarantee that none of those handlers will execute * concurrently. * * @par Order of handler invocation * Given: * * @li a strand object @c s * * @li an object @c a meeting completion handler requirements * * @li an object @c a1 which is an arbitrary copy of @c a made by the * implementation * * @li an object @c b meeting completion handler requirements * * @li an object @c b1 which is an arbitrary copy of @c b made by the * implementation * * if any of the following conditions are true: * * @li @c s.post(a) happens-before @c s.post(b) * * @li @c s.post(a) happens-before @c s.dispatch(b), where the latter is * performed outside the strand * * @li @c s.dispatch(a) happens-before @c s.post(b), where the former is * performed outside the strand * * @li @c s.dispatch(a) happens-before @c s.dispatch(b), where both are * performed outside the strand * * then @c asio_handler_invoke(a1, &a1) happens-before * @c asio_handler_invoke(b1, &b1). * * Note that in the following case: * @code async_op_1(..., s.wrap(a)); * async_op_2(..., s.wrap(b)); @endcode * the completion of the first async operation will perform @c s.dispatch(a), * and the second will perform @c s.dispatch(b), but the order in which those * are performed is unspecified. That is, you cannot state whether one * happens-before the other. Therefore none of the above conditions are met and * no ordering guarantee is made. * * @note The implementation makes no guarantee that handlers posted or * dispatched through different @c strand objects will be invoked concurrently. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Dispatcher. */ class io_service::strand { public: /// Constructor. /** * Constructs the strand. * * @param io_service The io_service object that the strand will use to * dispatch handlers that are ready to be run. */ explicit strand(asio::io_service& io_service) : service_(asio::use_service< asio::detail::strand_service>(io_service)) { service_.construct(impl_); } /// Destructor. /** * Destroys a strand. * * Handlers posted through the strand that have not yet been invoked will * still be dispatched in a way that meets the guarantee of non-concurrency. */ ~strand() { } /// Get the io_service associated with the strand. /** * This function may be used to obtain the io_service object that the strand * uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_service object that the strand will use to * dispatch handlers. Ownership is not transferred to the caller. */ asio::io_service& get_io_service() { return service_.get_io_service(); } /// Request the strand to invoke the given handler. /** * This function is used to ask the strand to execute the given handler. * * The strand object guarantees that handlers posted or dispatched through * the strand will not be executed concurrently. The handler may be executed * inside this function if the guarantee can be met. If this function is * called from within a handler that was posted or dispatched through the same * strand, then the new handler will be executed immediately. * * The strand's guarantee is in addition to the guarantee provided by the * underlying io_service. The io_service guarantees that the handler will only * be called in a thread in which the io_service's run member function is * currently being invoked. * * @param handler The handler to be called. The strand will make a copy of the * handler object as required. The function signature of the handler must be: * @code void handler(); @endcode */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) dispatch(ASIO_MOVE_ARG(CompletionHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a CompletionHandler. ASIO_COMPLETION_HANDLER_CHECK(CompletionHandler, handler) type_check; detail::async_result_init< CompletionHandler, void ()> init( ASIO_MOVE_CAST(CompletionHandler)(handler)); service_.dispatch(impl_, init.handler); return init.result.get(); } /// Request the strand to invoke the given handler and return /// immediately. /** * This function is used to ask the strand to execute the given handler, but * without allowing the strand to call the handler from inside this function. * * The strand object guarantees that handlers posted or dispatched through * the strand will not be executed concurrently. The strand's guarantee is in * addition to the guarantee provided by the underlying io_service. The * io_service guarantees that the handler will only be called in a thread in * which the io_service's run member function is currently being invoked. * * @param handler The handler to be called. The strand will make a copy of the * handler object as required. The function signature of the handler must be: * @code void handler(); @endcode */ template ASIO_INITFN_RESULT_TYPE(CompletionHandler, void ()) post(ASIO_MOVE_ARG(CompletionHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a CompletionHandler. ASIO_COMPLETION_HANDLER_CHECK(CompletionHandler, handler) type_check; detail::async_result_init< CompletionHandler, void ()> init( ASIO_MOVE_CAST(CompletionHandler)(handler)); service_.post(impl_, init.handler); return init.result.get(); } /// Create a new handler that automatically dispatches the wrapped handler /// on the strand. /** * This function is used to create a new handler function object that, when * invoked, will automatically pass the wrapped handler to the strand's * dispatch function. * * @param handler The handler to be wrapped. The strand will make a copy of * the handler object as required. The function signature of the handler must * be: @code void handler(A1 a1, ... An an); @endcode * * @return A function object that, when invoked, passes the wrapped handler to * the strand's dispatch function. Given a function object with the signature: * @code R f(A1 a1, ... An an); @endcode * If this function object is passed to the wrap function like so: * @code strand.wrap(f); @endcode * then the return value is a function object with the signature * @code void g(A1 a1, ... An an); @endcode * that, when invoked, executes code equivalent to: * @code strand.dispatch(boost::bind(f, a1, ... an)); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else detail::wrapped_handler #endif wrap(Handler handler) { return detail::wrapped_handler(*this, handler); } /// Determine whether the strand is running in the current thread. /** * @return @c true if the current thread is executing a handler that was * submitted to the strand using post(), dispatch() or wrap(). Otherwise * returns @c false. */ bool running_in_this_thread() const { return service_.running_in_this_thread(impl_); } private: asio::detail::strand_service& service_; asio::detail::strand_service::implementation_type impl_; }; /// (Deprecated: Use asio::io_service::strand.) Typedef for backwards /// compatibility. typedef asio::io_service::strand strand; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_STRAND_HPP galera-26.4.3/asio/asio/basic_signal_set.hpp0000664000177500017540000003140413540715002017250 0ustar dbartmy// // basic_signal_set.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SIGNAL_SET_HPP #define ASIO_BASIC_SIGNAL_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_io_object.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/signal_set_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides signal functionality. /** * The basic_signal_set class template provides the ability to perform an * asynchronous wait for one or more signals to occur. * * Most applications will use the asio::signal_set typedef. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * Performing an asynchronous wait: * @code * void handler( * const asio::error_code& error, * int signal_number) * { * if (!error) * { * // A signal occurred. * } * } * * ... * * // Construct a signal set registered for process termination. * asio::signal_set signals(io_service, SIGINT, SIGTERM); * * // Start an asynchronous wait for one of the signals to occur. * signals.async_wait(handler); * @endcode * * @par Queueing of signal notifications * * If a signal is registered with a signal_set, and the signal occurs when * there are no waiting handlers, then the signal notification is queued. The * next async_wait operation on that signal_set will dequeue the notification. * If multiple notifications are queued, subsequent async_wait operations * dequeue them one at a time. Signal notifications are dequeued in order of * ascending signal number. * * If a signal number is removed from a signal_set (using the @c remove or @c * erase member functions) then any queued notifications for that signal are * discarded. * * @par Multiple registration of signals * * The same signal number may be registered with different signal_set objects. * When the signal occurs, one handler is called for each signal_set object. * * Note that multiple registration only works for signals that are registered * using Asio. The application must not also register a signal handler using * functions such as @c signal() or @c sigaction(). * * @par Signal masking on POSIX platforms * * POSIX allows signals to be blocked using functions such as @c sigprocmask() * and @c pthread_sigmask(). For signals to be delivered, programs must ensure * that any signals registered using signal_set objects are unblocked in at * least one thread. */ template class basic_signal_set : public basic_io_object { public: /// Construct a signal set without adding any signals. /** * This constructor creates a signal set without registering for any signals. * * @param io_service The io_service object that the signal set will use to * dispatch handlers for any asynchronous operations performed on the set. */ explicit basic_signal_set(asio::io_service& io_service) : basic_io_object(io_service) { } /// Construct a signal set and add one signal. /** * This constructor creates a signal set and registers for one signal. * * @param io_service The io_service object that the signal set will use to * dispatch handlers for any asynchronous operations performed on the set. * * @param signal_number_1 The signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(io_service); * signals.add(signal_number_1); @endcode */ basic_signal_set(asio::io_service& io_service, int signal_number_1) : basic_io_object(io_service) { asio::error_code ec; this->service.add(this->implementation, signal_number_1, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add two signals. /** * This constructor creates a signal set and registers for two signals. * * @param io_service The io_service object that the signal set will use to * dispatch handlers for any asynchronous operations performed on the set. * * @param signal_number_1 The first signal number to be added. * * @param signal_number_2 The second signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(io_service); * signals.add(signal_number_1); * signals.add(signal_number_2); @endcode */ basic_signal_set(asio::io_service& io_service, int signal_number_1, int signal_number_2) : basic_io_object(io_service) { asio::error_code ec; this->service.add(this->implementation, signal_number_1, ec); asio::detail::throw_error(ec, "add"); this->service.add(this->implementation, signal_number_2, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add three signals. /** * This constructor creates a signal set and registers for three signals. * * @param io_service The io_service object that the signal set will use to * dispatch handlers for any asynchronous operations performed on the set. * * @param signal_number_1 The first signal number to be added. * * @param signal_number_2 The second signal number to be added. * * @param signal_number_3 The third signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(io_service); * signals.add(signal_number_1); * signals.add(signal_number_2); * signals.add(signal_number_3); @endcode */ basic_signal_set(asio::io_service& io_service, int signal_number_1, int signal_number_2, int signal_number_3) : basic_io_object(io_service) { asio::error_code ec; this->service.add(this->implementation, signal_number_1, ec); asio::detail::throw_error(ec, "add"); this->service.add(this->implementation, signal_number_2, ec); asio::detail::throw_error(ec, "add"); this->service.add(this->implementation, signal_number_3, ec); asio::detail::throw_error(ec, "add"); } /// Add a signal to a signal_set. /** * This function adds the specified signal to the set. It has no effect if the * signal is already in the set. * * @param signal_number The signal to be added to the set. * * @throws asio::system_error Thrown on failure. */ void add(int signal_number) { asio::error_code ec; this->service.add(this->implementation, signal_number, ec); asio::detail::throw_error(ec, "add"); } /// Add a signal to a signal_set. /** * This function adds the specified signal to the set. It has no effect if the * signal is already in the set. * * @param signal_number The signal to be added to the set. * * @param ec Set to indicate what error occurred, if any. */ asio::error_code add(int signal_number, asio::error_code& ec) { return this->service.add(this->implementation, signal_number, ec); } /// Remove a signal from a signal_set. /** * This function removes the specified signal from the set. It has no effect * if the signal is not in the set. * * @param signal_number The signal to be removed from the set. * * @throws asio::system_error Thrown on failure. * * @note Removes any notifications that have been queued for the specified * signal number. */ void remove(int signal_number) { asio::error_code ec; this->service.remove(this->implementation, signal_number, ec); asio::detail::throw_error(ec, "remove"); } /// Remove a signal from a signal_set. /** * This function removes the specified signal from the set. It has no effect * if the signal is not in the set. * * @param signal_number The signal to be removed from the set. * * @param ec Set to indicate what error occurred, if any. * * @note Removes any notifications that have been queued for the specified * signal number. */ asio::error_code remove(int signal_number, asio::error_code& ec) { return this->service.remove(this->implementation, signal_number, ec); } /// Remove all signals from a signal_set. /** * This function removes all signals from the set. It has no effect if the set * is already empty. * * @throws asio::system_error Thrown on failure. * * @note Removes all queued notifications. */ void clear() { asio::error_code ec; this->service.clear(this->implementation, ec); asio::detail::throw_error(ec, "clear"); } /// Remove all signals from a signal_set. /** * This function removes all signals from the set. It has no effect if the set * is already empty. * * @param ec Set to indicate what error occurred, if any. * * @note Removes all queued notifications. */ asio::error_code clear(asio::error_code& ec) { return this->service.clear(this->implementation, ec); } /// Cancel all operations associated with the signal set. /** * This function forces the completion of any pending asynchronous wait * operations against the signal set. The handler for each cancelled * operation will be invoked with the asio::error::operation_aborted * error code. * * Cancellation does not alter the set of registered signals. * * @throws asio::system_error Thrown on failure. * * @note If a registered signal occurred before cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ void cancel() { asio::error_code ec; this->service.cancel(this->implementation, ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all operations associated with the signal set. /** * This function forces the completion of any pending asynchronous wait * operations against the signal set. The handler for each cancelled * operation will be invoked with the asio::error::operation_aborted * error code. * * Cancellation does not alter the set of registered signals. * * @param ec Set to indicate what error occurred, if any. * * @note If a registered signal occurred before cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ asio::error_code cancel(asio::error_code& ec) { return this->service.cancel(this->implementation, ec); } /// Start an asynchronous operation to wait for a signal to be delivered. /** * This function may be used to initiate an asynchronous wait against the * signal set. It always returns immediately. * * For each call to async_wait(), the supplied handler will be called exactly * once. The handler will be called when: * * @li One of the registered signals in the signal set occurs; or * * @li The signal set was cancelled, in which case the handler is passed the * error code asio::error::operation_aborted. * * @param handler The handler to be called when the signal occurs. Copies * will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * int signal_number // Indicates which signal occurred. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). */ template ASIO_INITFN_RESULT_TYPE(SignalHandler, void (asio::error_code, int)) async_wait(ASIO_MOVE_ARG(SignalHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a SignalHandler. ASIO_SIGNAL_HANDLER_CHECK(SignalHandler, handler) type_check; return this->service.async_wait(this->implementation, ASIO_MOVE_CAST(SignalHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SIGNAL_SET_HPP galera-26.4.3/asio/asio/connect.hpp0000664000177500017540000007356313540715002015424 0ustar dbartmy// // connect.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_CONNECT_HPP #define ASIO_CONNECT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/basic_socket.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup connect asio::connect * * @brief Establishes a socket connection by trying each endpoint in a sequence. */ /*@{*/ /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. * * @par Example * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * asio::connect(s, r.resolve(q)); @endcode */ template Iterator connect(basic_socket& s, Iterator begin); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. * * @par Example * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * asio::error_code ec; * asio::connect(s, r.resolve(q), ec); * if (ec) * { * // An error occurred. * } @endcode */ template Iterator connect(basic_socket& s, Iterator begin, asio::error_code& ec); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @par Example * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::resolver::iterator i = r.resolve(q), end; * tcp::socket s(io_service); * asio::connect(s, i, end); @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @par Example * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::resolver::iterator i = r.resolve(q), end; * tcp::socket s(io_service); * asio::error_code ec; * asio::connect(s, i, end, ec); * if (ec) * { * // An error occurred. * } @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end, asio::error_code& ec); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code Iterator connect_condition( * const asio::error_code& ec, * Iterator next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is an iterator pointing to the next * endpoint to be tried. The function object should return the next iterator, * but is permitted to return a different iterator so that endpoints may be * skipped. The implementation guarantees that the function object will never * be called with the end iterator. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * template * Iterator operator()( * const asio::error_code& ec, * Iterator next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next->endpoint() << std::endl; * return next; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * tcp::resolver::iterator i = asio::connect( * s, r.resolve(q), my_connect_condition()); * std::cout << "Connected to: " << i->endpoint() << std::endl; @endcode */ template Iterator connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code Iterator connect_condition( * const asio::error_code& ec, * Iterator next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is an iterator pointing to the next * endpoint to be tried. The function object should return the next iterator, * but is permitted to return a different iterator so that endpoints may be * skipped. The implementation guarantees that the function object will never * be called with the end iterator. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * template * Iterator operator()( * const asio::error_code& ec, * Iterator next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next->endpoint() << std::endl; * return next; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * asio::error_code ec; * tcp::resolver::iterator i = asio::connect( * s, r.resolve(q), my_connect_condition(), ec); * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << i->endpoint() << std::endl; * } @endcode */ template Iterator connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, asio::error_code& ec); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code Iterator connect_condition( * const asio::error_code& ec, * Iterator next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is an iterator pointing to the next * endpoint to be tried. The function object should return the next iterator, * but is permitted to return a different iterator so that endpoints may be * skipped. The implementation guarantees that the function object will never * be called with the end iterator. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * template * Iterator operator()( * const asio::error_code& ec, * Iterator next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next->endpoint() << std::endl; * return next; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::resolver::iterator i = r.resolve(q), end; * tcp::socket s(io_service); * i = asio::connect(s, i, end, my_connect_condition()); * std::cout << "Connected to: " << i->endpoint() << std::endl; @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code Iterator connect_condition( * const asio::error_code& ec, * Iterator next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is an iterator pointing to the next * endpoint to be tried. The function object should return the next iterator, * but is permitted to return a different iterator so that endpoints may be * skipped. The implementation guarantees that the function object will never * be called with the end iterator. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * template * Iterator operator()( * const asio::error_code& ec, * Iterator next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next->endpoint() << std::endl; * return next; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::resolver::iterator i = r.resolve(q), end; * tcp::socket s(io_service); * asio::error_code ec; * i = asio::connect(s, i, end, my_connect_condition(), ec); * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << i->endpoint() << std::endl; * } @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition, asio::error_code& ec); /*@}*/ /** * @defgroup async_connect asio::async_connect * * @brief Asynchronously establishes a socket connection by trying each * endpoint in a sequence. */ /*@{*/ /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. * * @par Example * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (!ec) * { * asio::async_connect(s, i, connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * // ... * } @endcode */ template ASIO_INITFN_RESULT_TYPE(ComposedConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, ASIO_MOVE_ARG(ComposedConnectHandler) handler); /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (!ec) * { * tcp::resolver::iterator end; * asio::async_connect(s, i, end, connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * // ... * } @endcode */ template ASIO_INITFN_RESULT_TYPE(ComposedConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, Iterator end, ASIO_MOVE_ARG(ComposedConnectHandler) handler); /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code Iterator connect_condition( * const asio::error_code& ec, * Iterator next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is an iterator pointing to the next * endpoint to be tried. The function object should return the next iterator, * but is permitted to return a different iterator so that endpoints may be * skipped. The implementation guarantees that the function object will never * be called with the end iterator. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * template * Iterator operator()( * const asio::error_code& ec, * Iterator next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next->endpoint() << std::endl; * return next; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (!ec) * { * asio::async_connect(s, i, * my_connect_condition(), * connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << i->endpoint() << std::endl; * } * } @endcode */ template ASIO_INITFN_RESULT_TYPE(ComposedConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, ASIO_MOVE_ARG(ComposedConnectHandler) handler); /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code Iterator connect_condition( * const asio::error_code& ec, * Iterator next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is an iterator pointing to the next * endpoint to be tried. The function object should return the next iterator, * but is permitted to return a different iterator so that endpoints may be * skipped. The implementation guarantees that the function object will never * be called with the end iterator. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * template * Iterator operator()( * const asio::error_code& ec, * Iterator next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next->endpoint() << std::endl; * return next; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(io_service); * tcp::resolver::query q("host", "service"); * tcp::socket s(io_service); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (!ec) * { * tcp::resolver::iterator end; * asio::async_connect(s, i, end, * my_connect_condition(), * connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << i->endpoint() << std::endl; * } * } @endcode */ template ASIO_INITFN_RESULT_TYPE(ComposedConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition, ASIO_MOVE_ARG(ComposedConnectHandler) handler); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/connect.hpp" #endif galera-26.4.3/asio/asio/basic_seq_packet_socket.hpp0000664000177500017540000005324713540715002020620 0ustar dbartmy// // basic_seq_packet_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SEQ_PACKET_SOCKET_HPP #define ASIO_BASIC_SEQ_PACKET_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/seq_packet_socket_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides sequenced packet socket functionality. /** * The basic_seq_packet_socket class template provides asynchronous and blocking * sequenced packet socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template > class basic_seq_packet_socket : public basic_socket { public: /// (Deprecated: Use native_handle_type.) The native representation of a /// socket. typedef typename SeqPacketSocketService::native_handle_type native_type; /// The native representation of a socket. typedef typename SeqPacketSocketService::native_handle_type native_handle_type; /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_seq_packet_socket without opening it. /** * This constructor creates a sequenced packet socket without opening it. The * socket needs to be opened and then connected or accepted before data can * be sent or received on it. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. */ explicit basic_seq_packet_socket(asio::io_service& io_service) : basic_socket(io_service) { } /// Construct and open a basic_seq_packet_socket. /** * This constructor creates and opens a sequenced_packet socket. The socket * needs to be connected or accepted before data can be sent or received on * it. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(asio::io_service& io_service, const protocol_type& protocol) : basic_socket(io_service, protocol) { } /// Construct a basic_seq_packet_socket, opening it and binding it to the /// given local endpoint. /** * This constructor creates a sequenced packet socket and automatically opens * it bound to the specified endpoint on the local machine. The protocol used * is the protocol associated with the given endpoint. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. * * @param endpoint An endpoint on the local machine to which the sequenced * packet socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(asio::io_service& io_service, const endpoint_type& endpoint) : basic_socket(io_service, endpoint) { } /// Construct a basic_seq_packet_socket on an existing native socket. /** * This constructor creates a sequenced packet socket object to hold an * existing native socket. * * @param io_service The io_service object that the sequenced packet socket * will use to dispatch handlers for any asynchronous operations performed on * the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(asio::io_service& io_service, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket( io_service, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_seq_packet_socket from another. /** * This constructor moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ basic_seq_packet_socket(basic_seq_packet_socket&& other) : basic_socket( ASIO_MOVE_CAST(basic_seq_packet_socket)(other)) { } /// Move-assign a basic_seq_packet_socket from another. /** * This assignment operator moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ basic_seq_packet_socket& operator=(basic_seq_packet_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST(basic_seq_packet_socket)(other)); return *this; } /// Move-construct a basic_seq_packet_socket from a socket of another protocol /// type. /** * This constructor moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ template basic_seq_packet_socket( basic_seq_packet_socket&& other, typename enable_if::value>::type* = 0) : basic_socket( ASIO_MOVE_CAST2(basic_seq_packet_socket< Protocol1, SeqPacketSocketService1>)(other)) { } /// Move-assign a basic_seq_packet_socket from a socket of another protocol /// type. /** * This assignment operator moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(io_service&) constructor. */ template typename enable_if::value, basic_seq_packet_socket>::type& operator=( basic_seq_packet_socket&& other) { basic_socket::operator=( ASIO_MOVE_CAST2(basic_seq_packet_socket< Protocol1, SeqPacketSocketService1>)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Send some data on the socket. /** * This function is used to send data on the sequenced packet socket. The * function call will block until the data has been sent successfully, or an * until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->get_service().send( this->get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the sequenced packet socket. The * function call will block the data has been sent successfully, or an until * error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. Returns 0 if an error occurred. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->get_service().send( this->get_implementation(), buffers, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the sequenced packet * socket. The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; return this->get_service().async_send(this->get_implementation(), buffers, flags, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Receive some data on the socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), out_flags); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags& out_flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, 0, out_flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on the socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), 0, out_flags); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags) { asio::error_code ec; std::size_t s = this->get_service().receive( this->get_implementation(), buffers, in_flags, out_flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. Returns 0 if an error occurred. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { return this->get_service().receive(this->get_implementation(), buffers, in_flags, out_flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the sequenced * packet socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param out_flags Once the asynchronous operation completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. The caller must guarantee that the referenced * variable remains valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), out_flags, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive( this->get_implementation(), buffers, 0, out_flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the sequenced * data socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags Once the asynchronous operation completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. The caller must guarantee that the referenced * variable remains valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. Invocation * of the handler will be performed in a manner equivalent to using * asio::io_service::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive( * asio::buffer(data, size), * 0, out_flags, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return this->get_service().async_receive( this->get_implementation(), buffers, in_flags, out_flags, ASIO_MOVE_CAST(ReadHandler)(handler)); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SEQ_PACKET_SOCKET_HPP galera-26.4.3/asio/asio/spawn.hpp0000664000177500017540000002067613540715002015120 0ustar dbartmy// // spawn.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SPAWN_HPP #define ASIO_SPAWN_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/weak_ptr.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/io_service.hpp" #include "asio/strand.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Context object the represents the currently executing coroutine. /** * The basic_yield_context class is used to represent the currently executing * stackful coroutine. A basic_yield_context may be passed as a handler to an * asynchronous operation. For example: * * @code template * void my_coroutine(basic_yield_context yield) * { * ... * std::size_t n = my_socket.async_read_some(buffer, yield); * ... * } @endcode * * The initiating function (async_read_some in the above example) suspends the * current coroutine. The coroutine is resumed when the asynchronous operation * completes, and the result of the operation is returned. */ template class basic_yield_context { public: /// The coroutine callee type, used by the implementation. /** * When using Boost.Coroutine v1, this type is: * @code typename coroutine @endcode * When using Boost.Coroutine v2 (unidirectional coroutines), this type is: * @code push_coroutine @endcode */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined callee_type; #elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2) typedef boost::coroutines::push_coroutine callee_type; #else typedef boost::coroutines::coroutine callee_type; #endif /// The coroutine caller type, used by the implementation. /** * When using Boost.Coroutine v1, this type is: * @code typename coroutine::caller_type @endcode * When using Boost.Coroutine v2 (unidirectional coroutines), this type is: * @code pull_coroutine @endcode */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined caller_type; #elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2) typedef boost::coroutines::pull_coroutine caller_type; #else typedef boost::coroutines::coroutine::caller_type caller_type; #endif /// Construct a yield context to represent the specified coroutine. /** * Most applications do not need to use this constructor. Instead, the * spawn() function passes a yield context as an argument to the coroutine * function. */ basic_yield_context( const detail::weak_ptr& coro, caller_type& ca, Handler& handler) : coro_(coro), ca_(ca), handler_(handler), ec_(0) { } /// Return a yield context that sets the specified error_code. /** * By default, when a yield context is used with an asynchronous operation, a * non-success error_code is converted to system_error and thrown. This * operator may be used to specify an error_code object that should instead be * set with the asynchronous operation's result. For example: * * @code template * void my_coroutine(basic_yield_context yield) * { * ... * std::size_t n = my_socket.async_read_some(buffer, yield[ec]); * if (ec) * { * // An error occurred. * } * ... * } @endcode */ basic_yield_context operator[](asio::error_code& ec) const { basic_yield_context tmp(*this); tmp.ec_ = &ec; return tmp; } #if defined(GENERATING_DOCUMENTATION) private: #endif // defined(GENERATING_DOCUMENTATION) detail::weak_ptr coro_; caller_type& ca_; Handler& handler_; asio::error_code* ec_; }; #if defined(GENERATING_DOCUMENTATION) /// Context object that represents the currently executing coroutine. typedef basic_yield_context yield_context; #else // defined(GENERATING_DOCUMENTATION) typedef basic_yield_context< detail::wrapped_handler< io_service::strand, void(*)(), detail::is_continuation_if_running> > yield_context; #endif // defined(GENERATING_DOCUMENTATION) /** * @defgroup spawn asio::spawn * * @brief Start a new stackful coroutine. * * The spawn() function is a high-level wrapper over the Boost.Coroutine * library. This function enables programs to implement asynchronous logic in a * synchronous manner, as illustrated by the following example: * * @code asio::spawn(my_strand, do_echo); * * // ... * * void do_echo(asio::yield_context yield) * { * try * { * char data[128]; * for (;;) * { * std::size_t length = * my_socket.async_read_some( * asio::buffer(data), yield); * * asio::async_write(my_socket, * asio::buffer(data, length), yield); * } * } * catch (std::exception& e) * { * // ... * } * } @endcode */ /*@{*/ /// Start a new stackful coroutine, calling the specified handler when it /// completes. /** * This function is used to launch a new coroutine. * * @param handler A handler to be called when the coroutine exits. More * importantly, the handler provides an execution context (via the the handler * invocation hook) for the coroutine. The handler must have the signature: * @code void handler(); @endcode * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine, inheriting the execution context of another. /** * This function is used to launch a new coroutine. * * @param ctx Identifies the current coroutine as a parent of the new * coroutine. This specifies that the new coroutine should inherit the * execution context of the parent. For example, if the parent coroutine is * executing in a particular strand, then the new coroutine will execute in the * same strand. * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(basic_yield_context ctx, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes in the context of a strand. /** * This function is used to launch a new coroutine. * * @param strand Identifies a strand. By starting multiple coroutines on the * same strand, the implementation ensures that none of those coroutines can * execute simultaneously. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(asio::io_service::strand strand, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes on a given io_service. /** * This function is used to launch a new coroutine. * * @param io_service Identifies the io_service that will run the coroutine. The * new coroutine is implicitly given its own strand within this io_service. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(asio::io_service& io_service, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/spawn.hpp" #endif // ASIO_SPAWN_HPP galera-26.4.3/asio/asio/serial_port_service.hpp0000664000177500017540000001634113540715002020025 0ustar dbartmy// // serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SERIAL_PORT_SERVICE_HPP #define ASIO_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) \ || defined(GENERATING_DOCUMENTATION) #include #include #include "asio/async_result.hpp" #include "asio/detail/reactive_serial_port_service.hpp" #include "asio/detail/win_iocp_serial_port_service.hpp" #include "asio/error.hpp" #include "asio/io_service.hpp" #include "asio/serial_port_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default service implementation for a serial port. class serial_port_service #if defined(GENERATING_DOCUMENTATION) : public asio::io_service::service #else : public asio::detail::service_base #endif { public: #if defined(GENERATING_DOCUMENTATION) /// The unique service identifier. static asio::io_service::id id; #endif private: // The type of the platform-specific implementation. #if defined(ASIO_HAS_IOCP) typedef detail::win_iocp_serial_port_service service_impl_type; #else typedef detail::reactive_serial_port_service service_impl_type; #endif public: /// The type of a serial port implementation. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined implementation_type; #else typedef service_impl_type::implementation_type implementation_type; #endif /// (Deprecated: Use native_handle_type.) The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_type; #else typedef service_impl_type::native_handle_type native_type; #endif /// The native handle type. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef service_impl_type::native_handle_type native_handle_type; #endif /// Construct a new serial port service for the specified io_service. explicit serial_port_service(asio::io_service& io_service) : asio::detail::service_base(io_service), service_impl_(io_service) { } /// Construct a new serial port implementation. void construct(implementation_type& impl) { service_impl_.construct(impl); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { service_impl_.move_construct(impl, other_impl); } /// Move-assign from another serial port implementation. void move_assign(implementation_type& impl, serial_port_service& other_service, implementation_type& other_impl) { service_impl_.move_assign(impl, other_service.service_impl_, other_impl); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroy a serial port implementation. void destroy(implementation_type& impl) { service_impl_.destroy(impl); } /// Open a serial port. asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec) { return service_impl_.open(impl, device, ec); } /// Assign an existing native handle to a serial port. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return service_impl_.assign(impl, handle, ec); } /// Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return service_impl_.is_open(impl); } /// Close a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return service_impl_.close(impl, ec); } /// (Deprecated: Use native_handle().) Get the native handle implementation. native_type native(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Get the native handle implementation. native_handle_type native_handle(implementation_type& impl) { return service_impl_.native_handle(impl); } /// Cancel all asynchronous operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return service_impl_.cancel(impl, ec); } /// Set a serial port option. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return service_impl_.set_option(impl, option, ec); } /// Get a serial port option. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return service_impl_.get_option(impl, option, ec); } /// Send a break sequence to the serial port. asio::error_code send_break(implementation_type& impl, asio::error_code& ec) { return service_impl_.send_break(impl, ec); } /// Write the given data to the stream. template std::size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return service_impl_.write_some(impl, buffers, ec); } /// Start an asynchronous write. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { detail::async_result_init< WriteHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(WriteHandler)(handler)); service_impl_.async_write_some(impl, buffers, init.handler); return init.result.get(); } /// Read some data from the stream. template std::size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return service_impl_.read_some(impl, buffers, ec); } /// Start an asynchronous read. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { detail::async_result_init< ReadHandler, void (asio::error_code, std::size_t)> init( ASIO_MOVE_CAST(ReadHandler)(handler)); service_impl_.async_read_some(impl, buffers, init.handler); return init.result.get(); } private: // Destroy all user-defined handler objects owned by the service. void shutdown_service() { service_impl_.shutdown_service(); } // The platform-specific implementation. service_impl_type service_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_SERIAL_PORT) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_SERIAL_PORT_SERVICE_HPP galera-26.4.3/asio/asio/unyield.hpp0000664000177500017540000000057513540715002015435 0ustar dbartmy// // unyield.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifdef reenter # undef reenter #endif #ifdef yield # undef yield #endif #ifdef fork # undef fork #endif galera-26.4.3/asio/asio/serial_port.hpp0000664000177500017540000000164113540715002016302 0ustar dbartmy// // serial_port.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SERIAL_PORT_HPP #define ASIO_SERIAL_PORT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_serial_port.hpp" namespace asio { /// Typedef for the typical usage of a serial port. typedef basic_serial_port<> serial_port; } // namespace asio #endif // defined(ASIO_HAS_SERIAL_PORT) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_SERIAL_PORT_HPP galera-26.4.3/asio/asio/system_error.hpp0000664000177500017540000000550113540715002016513 0ustar dbartmy// // system_error.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SYSTEM_ERROR_HPP #define ASIO_SYSTEM_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SYSTEM_ERROR) # include #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) # include # include # include # include "asio/error_code.hpp" # include "asio/detail/scoped_ptr.hpp" #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_STD_SYSTEM_ERROR) typedef std::system_error system_error; #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// The system_error class is used to represent system conditions that /// prevent the library from operating correctly. class system_error : public std::exception { public: /// Construct with an error code. system_error(const error_code& ec) : code_(ec), context_() { } /// Construct with an error code and context. system_error(const error_code& ec, const std::string& context) : code_(ec), context_(context) { } /// Copy constructor. system_error(const system_error& other) : std::exception(other), code_(other.code_), context_(other.context_), what_() { } /// Destructor. virtual ~system_error() throw () { } /// Assignment operator. system_error& operator=(const system_error& e) { context_ = e.context_; code_ = e.code_; what_.reset(); return *this; } /// Get a string representation of the exception. virtual const char* what() const throw () { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { if (!what_.get()) { std::string tmp(context_); if (tmp.length()) tmp += ": "; tmp += code_.message(); what_.reset(new std::string(tmp)); } return what_->c_str(); } #if !defined(ASIO_NO_EXCEPTIONS) catch (std::exception&) { return "system_error"; } #endif // !defined(ASIO_NO_EXCEPTIONS) } /// Get the error code associated with the exception. error_code code() const { return code_; } private: // The code associated with the error. error_code code_; // The context associated with the error. std::string context_; // The string representation of the error. mutable asio::detail::scoped_ptr what_; }; #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SYSTEM_ERROR_HPP galera-26.4.3/asio/asio/time_traits.hpp0000664000177500017540000000422213540715002016301 0ustar dbartmy// // time_traits.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TIME_TRAITS_HPP #define ASIO_TIME_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/socket_types.hpp" // Must come before posix_time. #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/detail/push_options.hpp" namespace asio { /// Time traits suitable for use with the deadline timer. template struct time_traits; /// Time traits specialised for posix_time. template <> struct time_traits { /// The time type. typedef boost::posix_time::ptime time_type; /// The duration type. typedef boost::posix_time::time_duration duration_type; /// Get the current time. static time_type now() { #if defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK) return boost::posix_time::microsec_clock::universal_time(); #else // defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK) return boost::posix_time::second_clock::universal_time(); #endif // defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK) } /// Add a duration to a time. static time_type add(const time_type& t, const duration_type& d) { return t + d; } /// Subtract one time from another. static duration_type subtract(const time_type& t1, const time_type& t2) { return t1 - t2; } /// Test whether one time is less than another. static bool less_than(const time_type& t1, const time_type& t2) { return t1 < t2; } /// Convert to POSIX duration type. static boost::posix_time::time_duration to_posix_duration( const duration_type& d) { return d; } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_TIME_TRAITS_HPP galera-26.4.3/asio/asio/basic_socket_iostream.hpp0000664000177500017540000002125613540715002020317 0ustar dbartmy// // basic_socket_iostream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2016 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_IOSTREAM_HPP #define ASIO_BASIC_SOCKET_IOSTREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include #include #include "asio/basic_socket_streambuf.hpp" #include "asio/stream_socket_service.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # include "asio/detail/variadic_templates.hpp" // A macro that should expand to: // template // explicit basic_socket_iostream(T1 x1, ..., Tn xn) // : std::basic_iostream( // &this->detail::socket_iostream_base< // Protocol, StreamSocketService, Time, // TimeTraits, TimerService>::streambuf_) // { // if (rdbuf()->connect(x1, ..., xn) == 0) // this->setstate(std::ios_base::failbit); // } // This macro should only persist within this file. # define ASIO_PRIVATE_CTR_DEF(n) \ template \ explicit basic_socket_iostream(ASIO_VARIADIC_PARAMS(n)) \ : std::basic_iostream( \ &this->detail::socket_iostream_base< \ Protocol, StreamSocketService, Time, \ TimeTraits, TimerService>::streambuf_) \ { \ this->setf(std::ios_base::unitbuf); \ if (rdbuf()->connect(ASIO_VARIADIC_ARGS(n)) == 0) \ this->setstate(std::ios_base::failbit); \ } \ /**/ // A macro that should expand to: // template // void connect(T1 x1, ..., Tn xn) // { // if (rdbuf()->connect(x1, ..., xn) == 0) // this->setstate(std::ios_base::failbit); // } // This macro should only persist within this file. # define ASIO_PRIVATE_CONNECT_DEF(n) \ template \ void connect(ASIO_VARIADIC_PARAMS(n)) \ { \ if (rdbuf()->connect(ASIO_VARIADIC_ARGS(n)) == 0) \ this->setstate(std::ios_base::failbit); \ } \ /**/ #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A separate base class is used to ensure that the streambuf is initialised // prior to the basic_socket_iostream's basic_iostream base class. template class socket_iostream_base { protected: basic_socket_streambuf streambuf_; }; } /// Iostream interface for a socket. template , #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) typename Time = boost::posix_time::ptime, typename TimeTraits = asio::time_traits